aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu.h
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /include/linux/percpu.h
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'include/linux/percpu.h')
-rw-r--r--include/linux/percpu.h374
1 files changed, 349 insertions, 25 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 49466b13c5c6..9ca008f0c542 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -39,10 +39,17 @@
39 preempt_enable(); \ 39 preempt_enable(); \
40} while (0) 40} while (0)
41 41
42#ifdef CONFIG_SMP 42#define get_cpu_ptr(var) ({ \
43 preempt_disable(); \
44 this_cpu_ptr(var); })
45
46#define put_cpu_ptr(var) do { \
47 (void)(var); \
48 preempt_enable(); \
49} while (0)
43 50
44/* minimum unit size, also is the maximum supported allocation size */ 51/* minimum unit size, also is the maximum supported allocation size */
45#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) 52#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
46 53
47/* 54/*
48 * Percpu allocator can serve percpu allocations before slab is 55 * Percpu allocator can serve percpu allocations before slab is
@@ -137,37 +144,20 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
137 * dynamically allocated. Non-atomic access to the current CPU's 144 * dynamically allocated. Non-atomic access to the current CPU's
138 * version should probably be combined with get_cpu()/put_cpu(). 145 * version should probably be combined with get_cpu()/put_cpu().
139 */ 146 */
147#ifdef CONFIG_SMP
140#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) 148#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
149#else
150#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
151#endif
141 152
142extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); 153extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
143extern bool is_kernel_percpu_address(unsigned long addr); 154extern bool is_kernel_percpu_address(unsigned long addr);
144 155
145#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 156#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
146extern void __init setup_per_cpu_areas(void); 157extern void __init setup_per_cpu_areas(void);
147#endif 158#endif
148extern void __init percpu_init_late(void); 159extern void __init percpu_init_late(void);
149 160
150#else /* CONFIG_SMP */
151
152#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
153
154/* can't distinguish from other static vars, always false */
155static inline bool is_kernel_percpu_address(unsigned long addr)
156{
157 return false;
158}
159
160static inline void __init setup_per_cpu_areas(void) { }
161
162static inline void __init percpu_init_late(void) { }
163
164static inline void *pcpu_lpage_remapped(void *kaddr)
165{
166 return NULL;
167}
168
169#endif /* CONFIG_SMP */
170
171extern void __percpu *__alloc_percpu(size_t size, size_t align); 161extern void __percpu *__alloc_percpu(size_t size, size_t align);
172extern void free_percpu(void __percpu *__pdata); 162extern void free_percpu(void __percpu *__pdata);
173extern phys_addr_t per_cpu_ptr_to_phys(void *addr); 163extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
@@ -250,6 +240,48 @@ extern void __bad_size_call_parameter(void);
250 pscr_ret__; \ 240 pscr_ret__; \
251}) 241})
252 242
243#define __pcpu_size_call_return2(stem, variable, ...) \
244({ \
245 typeof(variable) pscr2_ret__; \
246 __verify_pcpu_ptr(&(variable)); \
247 switch(sizeof(variable)) { \
248 case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
249 case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
250 case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
251 case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
252 default: \
253 __bad_size_call_parameter(); break; \
254 } \
255 pscr2_ret__; \
256})
257
258/*
259 * Special handling for cmpxchg_double. cmpxchg_double is passed two
260 * percpu variables. The first has to be aligned to a double word
261 * boundary and the second has to follow directly thereafter.
262 * We enforce this on all architectures even if they don't support
263 * a double cmpxchg instruction, since it's a cheap requirement, and it
264 * avoids breaking the requirement for architectures with the instruction.
265 */
266#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
267({ \
268 bool pdcrb_ret__; \
269 __verify_pcpu_ptr(&pcp1); \
270 BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
271 VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \
272 VM_BUG_ON((unsigned long)(&pcp2) != \
273 (unsigned long)(&pcp1) + sizeof(pcp1)); \
274 switch(sizeof(pcp1)) { \
275 case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
276 case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
277 case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
278 case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
279 default: \
280 __bad_size_call_parameter(); break; \
281 } \
282 pdcrb_ret__; \
283})
284
253#define __pcpu_size_call(stem, variable, ...) \ 285#define __pcpu_size_call(stem, variable, ...) \
254do { \ 286do { \
255 __verify_pcpu_ptr(&(variable)); \ 287 __verify_pcpu_ptr(&(variable)); \
@@ -412,6 +444,128 @@ do { \
412# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) 444# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
413#endif 445#endif
414 446
447#define _this_cpu_generic_add_return(pcp, val) \
448({ \
449 typeof(pcp) ret__; \
450 preempt_disable(); \
451 __this_cpu_add(pcp, val); \
452 ret__ = __this_cpu_read(pcp); \
453 preempt_enable(); \
454 ret__; \
455})
456
457#ifndef this_cpu_add_return
458# ifndef this_cpu_add_return_1
459# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
460# endif
461# ifndef this_cpu_add_return_2
462# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
463# endif
464# ifndef this_cpu_add_return_4
465# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
466# endif
467# ifndef this_cpu_add_return_8
468# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
469# endif
470# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
471#endif
472
473#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
474#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
475#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
476
477#define _this_cpu_generic_xchg(pcp, nval) \
478({ typeof(pcp) ret__; \
479 preempt_disable(); \
480 ret__ = __this_cpu_read(pcp); \
481 __this_cpu_write(pcp, nval); \
482 preempt_enable(); \
483 ret__; \
484})
485
486#ifndef this_cpu_xchg
487# ifndef this_cpu_xchg_1
488# define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
489# endif
490# ifndef this_cpu_xchg_2
491# define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
492# endif
493# ifndef this_cpu_xchg_4
494# define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
495# endif
496# ifndef this_cpu_xchg_8
497# define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
498# endif
499# define this_cpu_xchg(pcp, nval) \
500 __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
501#endif
502
503#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
504({ typeof(pcp) ret__; \
505 preempt_disable(); \
506 ret__ = __this_cpu_read(pcp); \
507 if (ret__ == (oval)) \
508 __this_cpu_write(pcp, nval); \
509 preempt_enable(); \
510 ret__; \
511})
512
513#ifndef this_cpu_cmpxchg
514# ifndef this_cpu_cmpxchg_1
515# define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
516# endif
517# ifndef this_cpu_cmpxchg_2
518# define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
519# endif
520# ifndef this_cpu_cmpxchg_4
521# define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
522# endif
523# ifndef this_cpu_cmpxchg_8
524# define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
525# endif
526# define this_cpu_cmpxchg(pcp, oval, nval) \
527 __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
528#endif
529
530/*
531 * cmpxchg_double replaces two adjacent scalars at once. The first
532 * two parameters are per cpu variables which have to be of the same
533 * size. A truth value is returned to indicate success or failure
534 * (since a double register result is difficult to handle). There is
535 * very limited hardware support for these operations, so only certain
536 * sizes may work.
537 */
538#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
539({ \
540 int ret__; \
541 preempt_disable(); \
542 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
543 oval1, oval2, nval1, nval2); \
544 preempt_enable(); \
545 ret__; \
546})
547
548#ifndef this_cpu_cmpxchg_double
549# ifndef this_cpu_cmpxchg_double_1
550# define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
551 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
552# endif
553# ifndef this_cpu_cmpxchg_double_2
554# define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
555 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
556# endif
557# ifndef this_cpu_cmpxchg_double_4
558# define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
559 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
560# endif
561# ifndef this_cpu_cmpxchg_double_8
562# define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
563 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
564# endif
565# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
566 __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
567#endif
568
415/* 569/*
416 * Generic percpu operations that do not require preemption handling. 570 * Generic percpu operations that do not require preemption handling.
417 * Either we do not care about races or the caller has the 571 * Either we do not care about races or the caller has the
@@ -539,11 +693,120 @@ do { \
539# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) 693# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
540#endif 694#endif
541 695
696#define __this_cpu_generic_add_return(pcp, val) \
697({ \
698 __this_cpu_add(pcp, val); \
699 __this_cpu_read(pcp); \
700})
701
702#ifndef __this_cpu_add_return
703# ifndef __this_cpu_add_return_1
704# define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val)
705# endif
706# ifndef __this_cpu_add_return_2
707# define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val)
708# endif
709# ifndef __this_cpu_add_return_4
710# define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val)
711# endif
712# ifndef __this_cpu_add_return_8
713# define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val)
714# endif
715# define __this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
716#endif
717
718#define __this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
719#define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
720#define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
721
722#define __this_cpu_generic_xchg(pcp, nval) \
723({ typeof(pcp) ret__; \
724 ret__ = __this_cpu_read(pcp); \
725 __this_cpu_write(pcp, nval); \
726 ret__; \
727})
728
729#ifndef __this_cpu_xchg
730# ifndef __this_cpu_xchg_1
731# define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
732# endif
733# ifndef __this_cpu_xchg_2
734# define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
735# endif
736# ifndef __this_cpu_xchg_4
737# define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
738# endif
739# ifndef __this_cpu_xchg_8
740# define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
741# endif
742# define __this_cpu_xchg(pcp, nval) \
743 __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
744#endif
745
746#define __this_cpu_generic_cmpxchg(pcp, oval, nval) \
747({ \
748 typeof(pcp) ret__; \
749 ret__ = __this_cpu_read(pcp); \
750 if (ret__ == (oval)) \
751 __this_cpu_write(pcp, nval); \
752 ret__; \
753})
754
755#ifndef __this_cpu_cmpxchg
756# ifndef __this_cpu_cmpxchg_1
757# define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
758# endif
759# ifndef __this_cpu_cmpxchg_2
760# define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
761# endif
762# ifndef __this_cpu_cmpxchg_4
763# define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
764# endif
765# ifndef __this_cpu_cmpxchg_8
766# define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
767# endif
768# define __this_cpu_cmpxchg(pcp, oval, nval) \
769 __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
770#endif
771
772#define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
773({ \
774 int __ret = 0; \
775 if (__this_cpu_read(pcp1) == (oval1) && \
776 __this_cpu_read(pcp2) == (oval2)) { \
777 __this_cpu_write(pcp1, (nval1)); \
778 __this_cpu_write(pcp2, (nval2)); \
779 __ret = 1; \
780 } \
781 (__ret); \
782})
783
784#ifndef __this_cpu_cmpxchg_double
785# ifndef __this_cpu_cmpxchg_double_1
786# define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
787 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
788# endif
789# ifndef __this_cpu_cmpxchg_double_2
790# define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
791 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
792# endif
793# ifndef __this_cpu_cmpxchg_double_4
794# define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
795 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
796# endif
797# ifndef __this_cpu_cmpxchg_double_8
798# define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
799 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
800# endif
801# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
802 __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
803#endif
804
542/* 805/*
543 * IRQ safe versions of the per cpu RMW operations. Note that these operations 806 * IRQ safe versions of the per cpu RMW operations. Note that these operations
544 * are *not* safe against modification of the same variable from another 807 * are *not* safe against modification of the same variable from another
545 * processors (which one gets when using regular atomic operations) 808 * processors (which one gets when using regular atomic operations)
546 . They are guaranteed to be atomic vs. local interrupts and 809 * They are guaranteed to be atomic vs. local interrupts and
547 * preemption only. 810 * preemption only.
548 */ 811 */
549#define irqsafe_cpu_generic_to_op(pcp, val, op) \ 812#define irqsafe_cpu_generic_to_op(pcp, val, op) \
@@ -630,4 +893,65 @@ do { \
630# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) 893# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
631#endif 894#endif
632 895
896#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \
897({ \
898 typeof(pcp) ret__; \
899 unsigned long flags; \
900 local_irq_save(flags); \
901 ret__ = __this_cpu_read(pcp); \
902 if (ret__ == (oval)) \
903 __this_cpu_write(pcp, nval); \
904 local_irq_restore(flags); \
905 ret__; \
906})
907
908#ifndef irqsafe_cpu_cmpxchg
909# ifndef irqsafe_cpu_cmpxchg_1
910# define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
911# endif
912# ifndef irqsafe_cpu_cmpxchg_2
913# define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
914# endif
915# ifndef irqsafe_cpu_cmpxchg_4
916# define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
917# endif
918# ifndef irqsafe_cpu_cmpxchg_8
919# define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
920# endif
921# define irqsafe_cpu_cmpxchg(pcp, oval, nval) \
922 __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
923#endif
924
925#define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
926({ \
927 int ret__; \
928 unsigned long flags; \
929 local_irq_save(flags); \
930 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
931 oval1, oval2, nval1, nval2); \
932 local_irq_restore(flags); \
933 ret__; \
934})
935
936#ifndef irqsafe_cpu_cmpxchg_double
937# ifndef irqsafe_cpu_cmpxchg_double_1
938# define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
939 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
940# endif
941# ifndef irqsafe_cpu_cmpxchg_double_2
942# define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
943 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
944# endif
945# ifndef irqsafe_cpu_cmpxchg_double_4
946# define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
947 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
948# endif
949# ifndef irqsafe_cpu_cmpxchg_double_8
950# define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
951 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
952# endif
953# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
954 __pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
955#endif
956
633#endif /* __LINUX_PERCPU_H */ 957#endif /* __LINUX_PERCPU_H */