diff options
author | Tejun Heo <tj@kernel.org> | 2014-06-17 19:12:39 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-06-17 19:12:39 -0400 |
commit | a32f8d8eda8bd49017ac5f88e2b859f1f582557f (patch) | |
tree | 8ef931b5060b802d41c8cafe8356b5d155a5f8d8 | |
parent | 47b69ad673d9aa53c1d6032a6a522fc0ce8d6fc1 (diff) |
percpu: move {raw|this}_cpu_*() definitions to include/linux/percpu-defs.h
We're in the process of moving all percpu accessors and operations to
include/linux/percpu-defs.h so that they're available to arch headers
without having to include full include/linux/percpu.h which may cause
cyclic inclusion dependency.
This patch moves {raw|this}_cpu_*() definitions from
include/linux/percpu.h to include/linux/percpu-defs.h. The code is
moved mostly verbatim; however, raw_cpu_*() are placed above
this_cpu_*() which is more conventional as the raw operations may be
used to defined other variants.
This is pure reorganization.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Christoph Lameter <cl@linux.com>
-rw-r--r-- | include/linux/percpu-defs.h | 209 | ||||
-rw-r--r-- | include/linux/percpu.h | 208 |
2 files changed, 209 insertions, 208 deletions
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 94cd90afadac..6710eb9555fa 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h | |||
@@ -270,5 +270,214 @@ | |||
270 | preempt_enable(); \ | 270 | preempt_enable(); \ |
271 | } while (0) | 271 | } while (0) |
272 | 272 | ||
273 | /* | ||
274 | * Branching function to split up a function into a set of functions that | ||
275 | * are called for different scalar sizes of the objects handled. | ||
276 | */ | ||
277 | |||
278 | extern void __bad_size_call_parameter(void); | ||
279 | |||
280 | #ifdef CONFIG_DEBUG_PREEMPT | ||
281 | extern void __this_cpu_preempt_check(const char *op); | ||
282 | #else | ||
283 | static inline void __this_cpu_preempt_check(const char *op) { } | ||
284 | #endif | ||
285 | |||
286 | #define __pcpu_size_call_return(stem, variable) \ | ||
287 | ({ typeof(variable) pscr_ret__; \ | ||
288 | __verify_pcpu_ptr(&(variable)); \ | ||
289 | switch(sizeof(variable)) { \ | ||
290 | case 1: pscr_ret__ = stem##1(variable);break; \ | ||
291 | case 2: pscr_ret__ = stem##2(variable);break; \ | ||
292 | case 4: pscr_ret__ = stem##4(variable);break; \ | ||
293 | case 8: pscr_ret__ = stem##8(variable);break; \ | ||
294 | default: \ | ||
295 | __bad_size_call_parameter();break; \ | ||
296 | } \ | ||
297 | pscr_ret__; \ | ||
298 | }) | ||
299 | |||
300 | #define __pcpu_size_call_return2(stem, variable, ...) \ | ||
301 | ({ \ | ||
302 | typeof(variable) pscr2_ret__; \ | ||
303 | __verify_pcpu_ptr(&(variable)); \ | ||
304 | switch(sizeof(variable)) { \ | ||
305 | case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ | ||
306 | case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ | ||
307 | case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ | ||
308 | case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ | ||
309 | default: \ | ||
310 | __bad_size_call_parameter(); break; \ | ||
311 | } \ | ||
312 | pscr2_ret__; \ | ||
313 | }) | ||
314 | |||
315 | /* | ||
316 | * Special handling for cmpxchg_double. cmpxchg_double is passed two | ||
317 | * percpu variables. The first has to be aligned to a double word | ||
318 | * boundary and the second has to follow directly thereafter. | ||
319 | * We enforce this on all architectures even if they don't support | ||
320 | * a double cmpxchg instruction, since it's a cheap requirement, and it | ||
321 | * avoids breaking the requirement for architectures with the instruction. | ||
322 | */ | ||
323 | #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ | ||
324 | ({ \ | ||
325 | bool pdcrb_ret__; \ | ||
326 | __verify_pcpu_ptr(&pcp1); \ | ||
327 | BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ | ||
328 | VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \ | ||
329 | VM_BUG_ON((unsigned long)(&pcp2) != \ | ||
330 | (unsigned long)(&pcp1) + sizeof(pcp1)); \ | ||
331 | switch(sizeof(pcp1)) { \ | ||
332 | case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ | ||
333 | case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ | ||
334 | case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ | ||
335 | case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ | ||
336 | default: \ | ||
337 | __bad_size_call_parameter(); break; \ | ||
338 | } \ | ||
339 | pdcrb_ret__; \ | ||
340 | }) | ||
341 | |||
342 | #define __pcpu_size_call(stem, variable, ...) \ | ||
343 | do { \ | ||
344 | __verify_pcpu_ptr(&(variable)); \ | ||
345 | switch(sizeof(variable)) { \ | ||
346 | case 1: stem##1(variable, __VA_ARGS__);break; \ | ||
347 | case 2: stem##2(variable, __VA_ARGS__);break; \ | ||
348 | case 4: stem##4(variable, __VA_ARGS__);break; \ | ||
349 | case 8: stem##8(variable, __VA_ARGS__);break; \ | ||
350 | default: \ | ||
351 | __bad_size_call_parameter();break; \ | ||
352 | } \ | ||
353 | } while (0) | ||
354 | |||
355 | /* | ||
356 | * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com> | ||
357 | * | ||
358 | * Optimized manipulation for memory allocated through the per cpu | ||
359 | * allocator or for addresses of per cpu variables. | ||
360 | * | ||
361 | * These operation guarantee exclusivity of access for other operations | ||
362 | * on the *same* processor. The assumption is that per cpu data is only | ||
363 | * accessed by a single processor instance (the current one). | ||
364 | * | ||
365 | * The arch code can provide optimized implementation by defining macros | ||
366 | * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per | ||
367 | * cpu atomic operations for 2 byte sized RMW actions. If arch code does | ||
368 | * not provide operations for a scalar size then the fallback in the | ||
369 | * generic code will be used. | ||
370 | */ | ||
371 | |||
372 | /* | ||
373 | * Generic percpu operations for contexts where we do not want to do | ||
374 | * any checks for preemptiosn. | ||
375 | * | ||
376 | * If there is no other protection through preempt disable and/or | ||
377 | * disabling interupts then one of these RMW operations can show unexpected | ||
378 | * behavior because the execution thread was rescheduled on another processor | ||
379 | * or an interrupt occurred and the same percpu variable was modified from | ||
380 | * the interrupt context. | ||
381 | */ | ||
382 | # define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp)) | ||
383 | # define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val)) | ||
384 | # define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val)) | ||
385 | # define raw_cpu_sub(pcp, val) raw_cpu_add((pcp), -(val)) | ||
386 | # define raw_cpu_inc(pcp) raw_cpu_add((pcp), 1) | ||
387 | # define raw_cpu_dec(pcp) raw_cpu_sub((pcp), 1) | ||
388 | # define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val)) | ||
389 | # define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val)) | ||
390 | # define raw_cpu_add_return(pcp, val) \ | ||
391 | __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) | ||
392 | #define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) | ||
393 | #define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) | ||
394 | #define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) | ||
395 | # define raw_cpu_xchg(pcp, nval) \ | ||
396 | __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval) | ||
397 | # define raw_cpu_cmpxchg(pcp, oval, nval) \ | ||
398 | __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) | ||
399 | # define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
400 | __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) | ||
401 | |||
402 | /* | ||
403 | * Generic percpu operations for context that are safe from preemption/interrupts. | ||
404 | */ | ||
405 | # define __this_cpu_read(pcp) \ | ||
406 | (__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp))) | ||
407 | |||
408 | # define __this_cpu_write(pcp, val) \ | ||
409 | do { __this_cpu_preempt_check("write"); \ | ||
410 | __pcpu_size_call(raw_cpu_write_, (pcp), (val)); \ | ||
411 | } while (0) | ||
412 | |||
413 | # define __this_cpu_add(pcp, val) \ | ||
414 | do { __this_cpu_preempt_check("add"); \ | ||
415 | __pcpu_size_call(raw_cpu_add_, (pcp), (val)); \ | ||
416 | } while (0) | ||
417 | |||
418 | # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val)) | ||
419 | # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) | ||
420 | # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) | ||
421 | |||
422 | # define __this_cpu_and(pcp, val) \ | ||
423 | do { __this_cpu_preempt_check("and"); \ | ||
424 | __pcpu_size_call(raw_cpu_and_, (pcp), (val)); \ | ||
425 | } while (0) | ||
426 | |||
427 | # define __this_cpu_or(pcp, val) \ | ||
428 | do { __this_cpu_preempt_check("or"); \ | ||
429 | __pcpu_size_call(raw_cpu_or_, (pcp), (val)); \ | ||
430 | } while (0) | ||
431 | |||
432 | # define __this_cpu_add_return(pcp, val) \ | ||
433 | (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)) | ||
434 | |||
435 | #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) | ||
436 | #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) | ||
437 | #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) | ||
438 | |||
439 | # define __this_cpu_xchg(pcp, nval) \ | ||
440 | (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)) | ||
441 | |||
442 | # define __this_cpu_cmpxchg(pcp, oval, nval) \ | ||
443 | (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)) | ||
444 | |||
445 | # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
446 | (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))) | ||
447 | |||
448 | /* | ||
449 | * this_cpu_*() operations are used for accesses that must be done in a | ||
450 | * preemption safe way since we know that the context is not preempt | ||
451 | * safe. Interrupts may occur. If the interrupt modifies the variable too | ||
452 | * then RMW actions will not be reliable. | ||
453 | */ | ||
454 | # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp)) | ||
455 | # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val)) | ||
456 | # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val)) | ||
457 | # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val)) | ||
458 | # define this_cpu_inc(pcp) this_cpu_add((pcp), 1) | ||
459 | # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1) | ||
460 | # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val)) | ||
461 | # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) | ||
462 | # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) | ||
463 | #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) | ||
464 | #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) | ||
465 | #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) | ||
466 | # define this_cpu_xchg(pcp, nval) \ | ||
467 | __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval) | ||
468 | # define this_cpu_cmpxchg(pcp, oval, nval) \ | ||
469 | __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) | ||
470 | |||
471 | /* | ||
472 | * cmpxchg_double replaces two adjacent scalars at once. The first | ||
473 | * two parameters are per cpu variables which have to be of the same | ||
474 | * size. A truth value is returned to indicate success or failure | ||
475 | * (since a double register result is difficult to handle). There is | ||
476 | * very limited hardware support for these operations, so only certain | ||
477 | * sizes may work. | ||
478 | */ | ||
479 | # define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
480 | __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) | ||
481 | |||
273 | #endif /* __ASSEMBLY__ */ | 482 | #endif /* __ASSEMBLY__ */ |
274 | #endif /* _LINUX_PERCPU_DEFS_H */ | 483 | #endif /* _LINUX_PERCPU_DEFS_H */ |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 20b953532596..6f61b61b7996 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -129,212 +129,4 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | |||
129 | #define alloc_percpu(type) \ | 129 | #define alloc_percpu(type) \ |
130 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) | 130 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) |
131 | 131 | ||
132 | /* | ||
133 | * Branching function to split up a function into a set of functions that | ||
134 | * are called for different scalar sizes of the objects handled. | ||
135 | */ | ||
136 | |||
137 | extern void __bad_size_call_parameter(void); | ||
138 | |||
139 | #ifdef CONFIG_DEBUG_PREEMPT | ||
140 | extern void __this_cpu_preempt_check(const char *op); | ||
141 | #else | ||
142 | static inline void __this_cpu_preempt_check(const char *op) { } | ||
143 | #endif | ||
144 | |||
145 | #define __pcpu_size_call_return(stem, variable) \ | ||
146 | ({ typeof(variable) pscr_ret__; \ | ||
147 | __verify_pcpu_ptr(&(variable)); \ | ||
148 | switch(sizeof(variable)) { \ | ||
149 | case 1: pscr_ret__ = stem##1(variable);break; \ | ||
150 | case 2: pscr_ret__ = stem##2(variable);break; \ | ||
151 | case 4: pscr_ret__ = stem##4(variable);break; \ | ||
152 | case 8: pscr_ret__ = stem##8(variable);break; \ | ||
153 | default: \ | ||
154 | __bad_size_call_parameter();break; \ | ||
155 | } \ | ||
156 | pscr_ret__; \ | ||
157 | }) | ||
158 | |||
159 | #define __pcpu_size_call_return2(stem, variable, ...) \ | ||
160 | ({ \ | ||
161 | typeof(variable) pscr2_ret__; \ | ||
162 | __verify_pcpu_ptr(&(variable)); \ | ||
163 | switch(sizeof(variable)) { \ | ||
164 | case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ | ||
165 | case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ | ||
166 | case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ | ||
167 | case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ | ||
168 | default: \ | ||
169 | __bad_size_call_parameter(); break; \ | ||
170 | } \ | ||
171 | pscr2_ret__; \ | ||
172 | }) | ||
173 | |||
174 | /* | ||
175 | * Special handling for cmpxchg_double. cmpxchg_double is passed two | ||
176 | * percpu variables. The first has to be aligned to a double word | ||
177 | * boundary and the second has to follow directly thereafter. | ||
178 | * We enforce this on all architectures even if they don't support | ||
179 | * a double cmpxchg instruction, since it's a cheap requirement, and it | ||
180 | * avoids breaking the requirement for architectures with the instruction. | ||
181 | */ | ||
182 | #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ | ||
183 | ({ \ | ||
184 | bool pdcrb_ret__; \ | ||
185 | __verify_pcpu_ptr(&pcp1); \ | ||
186 | BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ | ||
187 | VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \ | ||
188 | VM_BUG_ON((unsigned long)(&pcp2) != \ | ||
189 | (unsigned long)(&pcp1) + sizeof(pcp1)); \ | ||
190 | switch(sizeof(pcp1)) { \ | ||
191 | case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ | ||
192 | case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ | ||
193 | case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ | ||
194 | case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ | ||
195 | default: \ | ||
196 | __bad_size_call_parameter(); break; \ | ||
197 | } \ | ||
198 | pdcrb_ret__; \ | ||
199 | }) | ||
200 | |||
201 | #define __pcpu_size_call(stem, variable, ...) \ | ||
202 | do { \ | ||
203 | __verify_pcpu_ptr(&(variable)); \ | ||
204 | switch(sizeof(variable)) { \ | ||
205 | case 1: stem##1(variable, __VA_ARGS__);break; \ | ||
206 | case 2: stem##2(variable, __VA_ARGS__);break; \ | ||
207 | case 4: stem##4(variable, __VA_ARGS__);break; \ | ||
208 | case 8: stem##8(variable, __VA_ARGS__);break; \ | ||
209 | default: \ | ||
210 | __bad_size_call_parameter();break; \ | ||
211 | } \ | ||
212 | } while (0) | ||
213 | |||
214 | /* | ||
215 | * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com> | ||
216 | * | ||
217 | * Optimized manipulation for memory allocated through the per cpu | ||
218 | * allocator or for addresses of per cpu variables. | ||
219 | * | ||
220 | * These operation guarantee exclusivity of access for other operations | ||
221 | * on the *same* processor. The assumption is that per cpu data is only | ||
222 | * accessed by a single processor instance (the current one). | ||
223 | * | ||
224 | * The first group is used for accesses that must be done in a | ||
225 | * preemption safe way since we know that the context is not preempt | ||
226 | * safe. Interrupts may occur. If the interrupt modifies the variable | ||
227 | * too then RMW actions will not be reliable. | ||
228 | * | ||
229 | * The arch code can provide optimized implementation by defining macros | ||
230 | * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per | ||
231 | * cpu atomic operations for 2 byte sized RMW actions. If arch code does | ||
232 | * not provide operations for a scalar size then the fallback in the | ||
233 | * generic code will be used. | ||
234 | */ | ||
235 | |||
236 | # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp)) | ||
237 | # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val)) | ||
238 | # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val)) | ||
239 | # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val)) | ||
240 | # define this_cpu_inc(pcp) this_cpu_add((pcp), 1) | ||
241 | # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1) | ||
242 | # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val)) | ||
243 | # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) | ||
244 | # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) | ||
245 | #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) | ||
246 | #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) | ||
247 | #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) | ||
248 | # define this_cpu_xchg(pcp, nval) \ | ||
249 | __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval) | ||
250 | # define this_cpu_cmpxchg(pcp, oval, nval) \ | ||
251 | __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) | ||
252 | |||
253 | /* | ||
254 | * cmpxchg_double replaces two adjacent scalars at once. The first | ||
255 | * two parameters are per cpu variables which have to be of the same | ||
256 | * size. A truth value is returned to indicate success or failure | ||
257 | * (since a double register result is difficult to handle). There is | ||
258 | * very limited hardware support for these operations, so only certain | ||
259 | * sizes may work. | ||
260 | */ | ||
261 | # define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
262 | __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) | ||
263 | |||
264 | /* | ||
265 | * Generic percpu operations for contexts where we do not want to do | ||
266 | * any checks for preemptiosn. | ||
267 | * | ||
268 | * If there is no other protection through preempt disable and/or | ||
269 | * disabling interupts then one of these RMW operations can show unexpected | ||
270 | * behavior because the execution thread was rescheduled on another processor | ||
271 | * or an interrupt occurred and the same percpu variable was modified from | ||
272 | * the interrupt context. | ||
273 | */ | ||
274 | # define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp)) | ||
275 | # define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val)) | ||
276 | # define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val)) | ||
277 | # define raw_cpu_sub(pcp, val) raw_cpu_add((pcp), -(val)) | ||
278 | # define raw_cpu_inc(pcp) raw_cpu_add((pcp), 1) | ||
279 | # define raw_cpu_dec(pcp) raw_cpu_sub((pcp), 1) | ||
280 | # define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val)) | ||
281 | # define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val)) | ||
282 | # define raw_cpu_add_return(pcp, val) \ | ||
283 | __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) | ||
284 | #define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) | ||
285 | #define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) | ||
286 | #define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) | ||
287 | # define raw_cpu_xchg(pcp, nval) \ | ||
288 | __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval) | ||
289 | # define raw_cpu_cmpxchg(pcp, oval, nval) \ | ||
290 | __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) | ||
291 | # define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
292 | __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) | ||
293 | |||
294 | /* | ||
295 | * Generic percpu operations for context that are safe from preemption/interrupts. | ||
296 | */ | ||
297 | # define __this_cpu_read(pcp) \ | ||
298 | (__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp))) | ||
299 | |||
300 | # define __this_cpu_write(pcp, val) \ | ||
301 | do { __this_cpu_preempt_check("write"); \ | ||
302 | __pcpu_size_call(raw_cpu_write_, (pcp), (val)); \ | ||
303 | } while (0) | ||
304 | |||
305 | # define __this_cpu_add(pcp, val) \ | ||
306 | do { __this_cpu_preempt_check("add"); \ | ||
307 | __pcpu_size_call(raw_cpu_add_, (pcp), (val)); \ | ||
308 | } while (0) | ||
309 | |||
310 | # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val)) | ||
311 | # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) | ||
312 | # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) | ||
313 | |||
314 | # define __this_cpu_and(pcp, val) \ | ||
315 | do { __this_cpu_preempt_check("and"); \ | ||
316 | __pcpu_size_call(raw_cpu_and_, (pcp), (val)); \ | ||
317 | } while (0) | ||
318 | |||
319 | # define __this_cpu_or(pcp, val) \ | ||
320 | do { __this_cpu_preempt_check("or"); \ | ||
321 | __pcpu_size_call(raw_cpu_or_, (pcp), (val)); \ | ||
322 | } while (0) | ||
323 | |||
324 | # define __this_cpu_add_return(pcp, val) \ | ||
325 | (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)) | ||
326 | |||
327 | #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) | ||
328 | #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) | ||
329 | #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) | ||
330 | |||
331 | # define __this_cpu_xchg(pcp, nval) \ | ||
332 | (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)) | ||
333 | |||
334 | # define __this_cpu_cmpxchg(pcp, oval, nval) \ | ||
335 | (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)) | ||
336 | |||
337 | # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
338 | (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))) | ||
339 | |||
340 | #endif /* __LINUX_PERCPU_H */ | 132 | #endif /* __LINUX_PERCPU_H */ |