aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/percpu.h5
-rw-r--r--include/linux/percpu.h400
2 files changed, 405 insertions, 0 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 90079c373f1..8087b90d467 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -56,6 +56,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
56#define __raw_get_cpu_var(var) \ 56#define __raw_get_cpu_var(var) \
57 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) 57 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
58 58
59#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
60#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
61
59 62
60#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA 63#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
61extern void setup_per_cpu_areas(void); 64extern void setup_per_cpu_areas(void);
@@ -66,6 +69,8 @@ extern void setup_per_cpu_areas(void);
66#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) 69#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
67#define __get_cpu_var(var) per_cpu_var(var) 70#define __get_cpu_var(var) per_cpu_var(var)
68#define __raw_get_cpu_var(var) per_cpu_var(var) 71#define __raw_get_cpu_var(var) per_cpu_var(var)
72#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
73#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
69 74
70#endif /* SMP */ 75#endif /* SMP */
71 76
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 5baf5b8788f..3d9ba92b104 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -219,4 +219,404 @@ do { \
219# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) 219# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
220#endif 220#endif
221 221
222/*
223 * Branching function to split up a function into a set of functions that
224 * are called for different scalar sizes of the objects handled.
225 */
226
227extern void __bad_size_call_parameter(void);
228
229#define __size_call_return(stem, variable) \
230({ typeof(variable) ret__; \
231 switch(sizeof(variable)) { \
232 case 1: ret__ = stem##1(variable);break; \
233 case 2: ret__ = stem##2(variable);break; \
234 case 4: ret__ = stem##4(variable);break; \
235 case 8: ret__ = stem##8(variable);break; \
236 default: \
237 __bad_size_call_parameter();break; \
238 } \
239 ret__; \
240})
241
242#define __size_call(stem, variable, ...) \
243do { \
244 switch(sizeof(variable)) { \
245 case 1: stem##1(variable, __VA_ARGS__);break; \
246 case 2: stem##2(variable, __VA_ARGS__);break; \
247 case 4: stem##4(variable, __VA_ARGS__);break; \
248 case 8: stem##8(variable, __VA_ARGS__);break; \
249 default: \
250 __bad_size_call_parameter();break; \
251 } \
252} while (0)
253
254/*
255 * Optimized manipulation for memory allocated through the per cpu
256 * allocator or for addresses of per cpu variables (can be determined
257 * using per_cpu_var(xx).
258 *
259 * These operation guarantee exclusivity of access for other operations
260 * on the *same* processor. The assumption is that per cpu data is only
261 * accessed by a single processor instance (the current one).
262 *
263 * The first group is used for accesses that must be done in a
264 * preemption safe way since we know that the context is not preempt
265 * safe. Interrupts may occur. If the interrupt modifies the variable
266 * too then RMW actions will not be reliable.
267 *
268 * The arch code can provide optimized functions in two ways:
269 *
270 * 1. Override the function completely. F.e. define this_cpu_add().
271 * The arch must then ensure that the various scalar format passed
272 * are handled correctly.
273 *
274 * 2. Provide functions for certain scalar sizes. F.e. provide
275 * this_cpu_add_2() to provide per cpu atomic operations for 2 byte
276 * sized RMW actions. If arch code does not provide operations for
277 * a scalar size then the fallback in the generic code will be
278 * used.
279 */
280
281#define _this_cpu_generic_read(pcp) \
282({ typeof(pcp) ret__; \
283 preempt_disable(); \
284 ret__ = *this_cpu_ptr(&(pcp)); \
285 preempt_enable(); \
286 ret__; \
287})
288
289#ifndef this_cpu_read
290# ifndef this_cpu_read_1
291# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
292# endif
293# ifndef this_cpu_read_2
294# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
295# endif
296# ifndef this_cpu_read_4
297# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
298# endif
299# ifndef this_cpu_read_8
300# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
301# endif
302# define this_cpu_read(pcp) __size_call_return(this_cpu_read_, (pcp))
303#endif
304
305#define _this_cpu_generic_to_op(pcp, val, op) \
306do { \
307 preempt_disable(); \
308 *__this_cpu_ptr(&pcp) op val; \
309 preempt_enable(); \
310} while (0)
311
312#ifndef this_cpu_write
313# ifndef this_cpu_write_1
314# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
315# endif
316# ifndef this_cpu_write_2
317# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
318# endif
319# ifndef this_cpu_write_4
320# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
321# endif
322# ifndef this_cpu_write_8
323# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
324# endif
325# define this_cpu_write(pcp, val) __size_call(this_cpu_write_, (pcp), (val))
326#endif
327
328#ifndef this_cpu_add
329# ifndef this_cpu_add_1
330# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
331# endif
332# ifndef this_cpu_add_2
333# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
334# endif
335# ifndef this_cpu_add_4
336# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
337# endif
338# ifndef this_cpu_add_8
339# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
340# endif
341# define this_cpu_add(pcp, val) __size_call(this_cpu_add_, (pcp), (val))
342#endif
343
344#ifndef this_cpu_sub
345# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
346#endif
347
348#ifndef this_cpu_inc
349# define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
350#endif
351
352#ifndef this_cpu_dec
353# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
354#endif
355
356#ifndef this_cpu_and
357# ifndef this_cpu_and_1
358# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
359# endif
360# ifndef this_cpu_and_2
361# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
362# endif
363# ifndef this_cpu_and_4
364# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
365# endif
366# ifndef this_cpu_and_8
367# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
368# endif
369# define this_cpu_and(pcp, val) __size_call(this_cpu_and_, (pcp), (val))
370#endif
371
372#ifndef this_cpu_or
373# ifndef this_cpu_or_1
374# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
375# endif
376# ifndef this_cpu_or_2
377# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
378# endif
379# ifndef this_cpu_or_4
380# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
381# endif
382# ifndef this_cpu_or_8
383# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
384# endif
385# define this_cpu_or(pcp, val) __size_call(this_cpu_or_, (pcp), (val))
386#endif
387
388#ifndef this_cpu_xor
389# ifndef this_cpu_xor_1
390# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
391# endif
392# ifndef this_cpu_xor_2
393# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
394# endif
395# ifndef this_cpu_xor_4
396# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
397# endif
398# ifndef this_cpu_xor_8
399# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
400# endif
401# define this_cpu_xor(pcp, val) __size_call(this_cpu_or_, (pcp), (val))
402#endif
403
404/*
405 * Generic percpu operations that do not require preemption handling.
406 * Either we do not care about races or the caller has the
407 * responsibility of handling preemptions issues. Arch code can still
408 * override these instructions since the arch per cpu code may be more
409 * efficient and may actually get race freeness for free (that is the
410 * case for x86 for example).
411 *
412 * If there is no other protection through preempt disable and/or
413 * disabling interupts then one of these RMW operations can show unexpected
414 * behavior because the execution thread was rescheduled on another processor
415 * or an interrupt occurred and the same percpu variable was modified from
416 * the interrupt context.
417 */
418#ifndef __this_cpu_read
419# ifndef __this_cpu_read_1
420# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp)))
421# endif
422# ifndef __this_cpu_read_2
423# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp)))
424# endif
425# ifndef __this_cpu_read_4
426# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp)))
427# endif
428# ifndef __this_cpu_read_8
429# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
430# endif
431# define __this_cpu_read(pcp) __size_call_return(__this_cpu_read_, (pcp))
432#endif
433
434#define __this_cpu_generic_to_op(pcp, val, op) \
435do { \
436 *__this_cpu_ptr(&(pcp)) op val; \
437} while (0)
438
439#ifndef __this_cpu_write
440# ifndef __this_cpu_write_1
441# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
442# endif
443# ifndef __this_cpu_write_2
444# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
445# endif
446# ifndef __this_cpu_write_4
447# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
448# endif
449# ifndef __this_cpu_write_8
450# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
451# endif
452# define __this_cpu_write(pcp, val) __size_call(__this_cpu_write_, (pcp), (val))
453#endif
454
455#ifndef __this_cpu_add
456# ifndef __this_cpu_add_1
457# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
458# endif
459# ifndef __this_cpu_add_2
460# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
461# endif
462# ifndef __this_cpu_add_4
463# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
464# endif
465# ifndef __this_cpu_add_8
466# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
467# endif
468# define __this_cpu_add(pcp, val) __size_call(__this_cpu_add_, (pcp), (val))
469#endif
470
471#ifndef __this_cpu_sub
472# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
473#endif
474
475#ifndef __this_cpu_inc
476# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
477#endif
478
479#ifndef __this_cpu_dec
480# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
481#endif
482
483#ifndef __this_cpu_and
484# ifndef __this_cpu_and_1
485# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
486# endif
487# ifndef __this_cpu_and_2
488# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
489# endif
490# ifndef __this_cpu_and_4
491# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
492# endif
493# ifndef __this_cpu_and_8
494# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
495# endif
496# define __this_cpu_and(pcp, val) __size_call(__this_cpu_and_, (pcp), (val))
497#endif
498
499#ifndef __this_cpu_or
500# ifndef __this_cpu_or_1
501# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
502# endif
503# ifndef __this_cpu_or_2
504# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
505# endif
506# ifndef __this_cpu_or_4
507# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
508# endif
509# ifndef __this_cpu_or_8
510# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
511# endif
512# define __this_cpu_or(pcp, val) __size_call(__this_cpu_or_, (pcp), (val))
513#endif
514
515#ifndef __this_cpu_xor
516# ifndef __this_cpu_xor_1
517# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
518# endif
519# ifndef __this_cpu_xor_2
520# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
521# endif
522# ifndef __this_cpu_xor_4
523# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
524# endif
525# ifndef __this_cpu_xor_8
526# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
527# endif
528# define __this_cpu_xor(pcp, val) __size_call(__this_cpu_xor_, (pcp), (val))
529#endif
530
531/*
532 * IRQ safe versions of the per cpu RMW operations. Note that these operations
533 * are *not* safe against modification of the same variable from another
534 * processors (which one gets when using regular atomic operations)
535 . They are guaranteed to be atomic vs. local interrupts and
536 * preemption only.
537 */
538#define irqsafe_cpu_generic_to_op(pcp, val, op) \
539do { \
540 unsigned long flags; \
541 local_irq_save(flags); \
542 *__this_cpu_ptr(&(pcp)) op val; \
543 local_irq_restore(flags); \
544} while (0)
545
546#ifndef irqsafe_cpu_add
547# ifndef irqsafe_cpu_add_1
548# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
549# endif
550# ifndef irqsafe_cpu_add_2
551# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
552# endif
553# ifndef irqsafe_cpu_add_4
554# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
555# endif
556# ifndef irqsafe_cpu_add_8
557# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
558# endif
559# define irqsafe_cpu_add(pcp, val) __size_call(irqsafe_cpu_add_, (pcp), (val))
560#endif
561
562#ifndef irqsafe_cpu_sub
563# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
564#endif
565
566#ifndef irqsafe_cpu_inc
567# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
568#endif
569
570#ifndef irqsafe_cpu_dec
571# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
572#endif
573
574#ifndef irqsafe_cpu_and
575# ifndef irqsafe_cpu_and_1
576# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
577# endif
578# ifndef irqsafe_cpu_and_2
579# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
580# endif
581# ifndef irqsafe_cpu_and_4
582# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
583# endif
584# ifndef irqsafe_cpu_and_8
585# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
586# endif
587# define irqsafe_cpu_and(pcp, val) __size_call(irqsafe_cpu_and_, (val))
588#endif
589
590#ifndef irqsafe_cpu_or
591# ifndef irqsafe_cpu_or_1
592# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
593# endif
594# ifndef irqsafe_cpu_or_2
595# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
596# endif
597# ifndef irqsafe_cpu_or_4
598# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
599# endif
600# ifndef irqsafe_cpu_or_8
601# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
602# endif
603# define irqsafe_cpu_or(pcp, val) __size_call(irqsafe_cpu_or_, (val))
604#endif
605
606#ifndef irqsafe_cpu_xor
607# ifndef irqsafe_cpu_xor_1
608# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
609# endif
610# ifndef irqsafe_cpu_xor_2
611# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
612# endif
613# ifndef irqsafe_cpu_xor_4
614# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
615# endif
616# ifndef irqsafe_cpu_xor_8
617# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
618# endif
619# define irqsafe_cpu_xor(pcp, val) __size_call(irqsafe_cpu_xor_, (val))
620#endif
621
222#endif /* __LINUX_PERCPU_H */ 622#endif /* __LINUX_PERCPU_H */