aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-06-17 19:12:39 -0400
committerTejun Heo <tj@kernel.org>2014-06-17 19:12:39 -0400
commita32f8d8eda8bd49017ac5f88e2b859f1f582557f (patch)
tree8ef931b5060b802d41c8cafe8356b5d155a5f8d8 /include/linux/percpu.h
parent47b69ad673d9aa53c1d6032a6a522fc0ce8d6fc1 (diff)
percpu: move {raw|this}_cpu_*() definitions to include/linux/percpu-defs.h
We're in the process of moving all percpu accessors and operations to include/linux/percpu-defs.h so that they're available to arch headers without having to include full include/linux/percpu.h which may cause cyclic inclusion dependency. This patch moves {raw|this}_cpu_*() definitions from include/linux/percpu.h to include/linux/percpu-defs.h. The code is moved mostly verbatim; however, raw_cpu_*() are placed above this_cpu_*() which is more conventional as the raw operations may be used to defined other variants. This is pure reorganization. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Christoph Lameter <cl@linux.com>
Diffstat (limited to 'include/linux/percpu.h')
-rw-r--r--include/linux/percpu.h208
1 files changed, 0 insertions, 208 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 20b953532596..6f61b61b7996 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -129,212 +129,4 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
129#define alloc_percpu(type) \ 129#define alloc_percpu(type) \
130 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) 130 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
131 131
132/*
133 * Branching function to split up a function into a set of functions that
134 * are called for different scalar sizes of the objects handled.
135 */
136
137extern void __bad_size_call_parameter(void);
138
139#ifdef CONFIG_DEBUG_PREEMPT
140extern void __this_cpu_preempt_check(const char *op);
141#else
142static inline void __this_cpu_preempt_check(const char *op) { }
143#endif
144
145#define __pcpu_size_call_return(stem, variable) \
146({ typeof(variable) pscr_ret__; \
147 __verify_pcpu_ptr(&(variable)); \
148 switch(sizeof(variable)) { \
149 case 1: pscr_ret__ = stem##1(variable);break; \
150 case 2: pscr_ret__ = stem##2(variable);break; \
151 case 4: pscr_ret__ = stem##4(variable);break; \
152 case 8: pscr_ret__ = stem##8(variable);break; \
153 default: \
154 __bad_size_call_parameter();break; \
155 } \
156 pscr_ret__; \
157})
158
159#define __pcpu_size_call_return2(stem, variable, ...) \
160({ \
161 typeof(variable) pscr2_ret__; \
162 __verify_pcpu_ptr(&(variable)); \
163 switch(sizeof(variable)) { \
164 case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
165 case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
166 case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
167 case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
168 default: \
169 __bad_size_call_parameter(); break; \
170 } \
171 pscr2_ret__; \
172})
173
174/*
175 * Special handling for cmpxchg_double. cmpxchg_double is passed two
176 * percpu variables. The first has to be aligned to a double word
177 * boundary and the second has to follow directly thereafter.
178 * We enforce this on all architectures even if they don't support
179 * a double cmpxchg instruction, since it's a cheap requirement, and it
180 * avoids breaking the requirement for architectures with the instruction.
181 */
182#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
183({ \
184 bool pdcrb_ret__; \
185 __verify_pcpu_ptr(&pcp1); \
186 BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
187 VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \
188 VM_BUG_ON((unsigned long)(&pcp2) != \
189 (unsigned long)(&pcp1) + sizeof(pcp1)); \
190 switch(sizeof(pcp1)) { \
191 case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
192 case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
193 case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
194 case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
195 default: \
196 __bad_size_call_parameter(); break; \
197 } \
198 pdcrb_ret__; \
199})
200
201#define __pcpu_size_call(stem, variable, ...) \
202do { \
203 __verify_pcpu_ptr(&(variable)); \
204 switch(sizeof(variable)) { \
205 case 1: stem##1(variable, __VA_ARGS__);break; \
206 case 2: stem##2(variable, __VA_ARGS__);break; \
207 case 4: stem##4(variable, __VA_ARGS__);break; \
208 case 8: stem##8(variable, __VA_ARGS__);break; \
209 default: \
210 __bad_size_call_parameter();break; \
211 } \
212} while (0)
213
214/*
215 * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
216 *
217 * Optimized manipulation for memory allocated through the per cpu
218 * allocator or for addresses of per cpu variables.
219 *
220 * These operation guarantee exclusivity of access for other operations
221 * on the *same* processor. The assumption is that per cpu data is only
222 * accessed by a single processor instance (the current one).
223 *
224 * The first group is used for accesses that must be done in a
225 * preemption safe way since we know that the context is not preempt
226 * safe. Interrupts may occur. If the interrupt modifies the variable
227 * too then RMW actions will not be reliable.
228 *
229 * The arch code can provide optimized implementation by defining macros
230 * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per
231 * cpu atomic operations for 2 byte sized RMW actions. If arch code does
232 * not provide operations for a scalar size then the fallback in the
233 * generic code will be used.
234 */
235
236# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
237# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
238# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
239# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val))
240# define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
241# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
242# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
243# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
244# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
245#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val))
246#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
247#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
248# define this_cpu_xchg(pcp, nval) \
249 __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
250# define this_cpu_cmpxchg(pcp, oval, nval) \
251 __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
252
253/*
254 * cmpxchg_double replaces two adjacent scalars at once. The first
255 * two parameters are per cpu variables which have to be of the same
256 * size. A truth value is returned to indicate success or failure
257 * (since a double register result is difficult to handle). There is
258 * very limited hardware support for these operations, so only certain
259 * sizes may work.
260 */
261# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
262 __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
263
264/*
265 * Generic percpu operations for contexts where we do not want to do
266 * any checks for preemptiosn.
267 *
268 * If there is no other protection through preempt disable and/or
269 * disabling interupts then one of these RMW operations can show unexpected
270 * behavior because the execution thread was rescheduled on another processor
271 * or an interrupt occurred and the same percpu variable was modified from
272 * the interrupt context.
273 */
274# define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp))
275# define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val))
276# define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val))
277# define raw_cpu_sub(pcp, val) raw_cpu_add((pcp), -(val))
278# define raw_cpu_inc(pcp) raw_cpu_add((pcp), 1)
279# define raw_cpu_dec(pcp) raw_cpu_sub((pcp), 1)
280# define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val))
281# define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val))
282# define raw_cpu_add_return(pcp, val) \
283 __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
284#define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val))
285#define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1)
286#define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1)
287# define raw_cpu_xchg(pcp, nval) \
288 __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)
289# define raw_cpu_cmpxchg(pcp, oval, nval) \
290 __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
291# define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
292 __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
293
294/*
295 * Generic percpu operations for context that are safe from preemption/interrupts.
296 */
297# define __this_cpu_read(pcp) \
298 (__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp)))
299
300# define __this_cpu_write(pcp, val) \
301do { __this_cpu_preempt_check("write"); \
302 __pcpu_size_call(raw_cpu_write_, (pcp), (val)); \
303} while (0)
304
305# define __this_cpu_add(pcp, val) \
306do { __this_cpu_preempt_check("add"); \
307 __pcpu_size_call(raw_cpu_add_, (pcp), (val)); \
308} while (0)
309
310# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val))
311# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
312# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
313
314# define __this_cpu_and(pcp, val) \
315do { __this_cpu_preempt_check("and"); \
316 __pcpu_size_call(raw_cpu_and_, (pcp), (val)); \
317} while (0)
318
319# define __this_cpu_or(pcp, val) \
320do { __this_cpu_preempt_check("or"); \
321 __pcpu_size_call(raw_cpu_or_, (pcp), (val)); \
322} while (0)
323
324# define __this_cpu_add_return(pcp, val) \
325 (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val))
326
327#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
328#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
329#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
330
331# define __this_cpu_xchg(pcp, nval) \
332 (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval))
333
334# define __this_cpu_cmpxchg(pcp, oval, nval) \
335 (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval))
336
337# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
338 (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)))
339
340#endif /* __LINUX_PERCPU_H */ 132#endif /* __LINUX_PERCPU_H */