aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/percpu.h47
1 files changed, 22 insertions, 25 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 1fdaee93c04d..d99e24ae1811 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -82,46 +82,43 @@ struct percpu_data {
82 82
83#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) 83#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
84 84
85extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); 85/*
86extern void percpu_free(void *__pdata); 86 * Use this to get to a cpu's version of the per-cpu object
87 * dynamically allocated. Non-atomic access to the current CPU's
88 * version should probably be combined with get_cpu()/put_cpu().
89 */
90#define per_cpu_ptr(ptr, cpu) \
91({ \
92 struct percpu_data *__p = __percpu_disguise(ptr); \
93 (__typeof__(ptr))__p->ptrs[(cpu)]; \
94})
95
96extern void *__alloc_percpu(size_t size, size_t align);
97extern void free_percpu(void *__pdata);
87 98
88#else /* CONFIG_SMP */ 99#else /* CONFIG_SMP */
89 100
90#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 101#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
91 102
92static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) 103static inline void *__alloc_percpu(size_t size, size_t align)
93{ 104{
105 /*
106 * Can't easily make larger alignment work with kmalloc. WARN
107 * on it. Larger alignment should only be used for module
108 * percpu sections on SMP for which this path isn't used.
109 */
110 WARN_ON_ONCE(align > __alignof__(unsigned long long));
94 return kzalloc(size, gfp); 111 return kzalloc(size, gfp);
95} 112}
96 113
97static inline void percpu_free(void *__pdata) 114static inline void free_percpu(void *p)
98{ 115{
99 kfree(__pdata); 116 kfree(p);
100} 117}
101 118
102#endif /* CONFIG_SMP */ 119#endif /* CONFIG_SMP */
103 120
104#define percpu_alloc_mask(size, gfp, mask) \
105 __percpu_alloc_mask((size), (gfp), &(mask))
106
107#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
108
109/* (legacy) interface for use without CPU hotplug handling */
110
111#define __alloc_percpu(size, align) percpu_alloc_mask((size), GFP_KERNEL, \
112 cpu_possible_map)
113#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ 121#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
114 __alignof__(type)) 122 __alignof__(type))
115#define free_percpu(ptr) percpu_free((ptr))
116/*
117 * Use this to get to a cpu's version of the per-cpu object dynamically
118 * allocated. Non-atomic access to the current CPU's version should
119 * probably be combined with get_cpu()/put_cpu().
120 */
121#define per_cpu_ptr(ptr, cpu) \
122({ \
123 struct percpu_data *__p = __percpu_disguise(ptr); \
124 (__typeof__(ptr))__p->ptrs[(cpu)]; \
125})
126 123
127#endif /* __LINUX_PERCPU_H */ 124#endif /* __LINUX_PERCPU_H */