diff options
Diffstat (limited to 'include/linux/percpu.h')
-rw-r--r-- | include/linux/percpu.h | 44 |
1 files changed, 27 insertions, 17 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index cf5efbcf716..a93e5bfdccb 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -27,10 +27,17 @@ | |||
27 | * we force a syntax error here if it isn't. | 27 | * we force a syntax error here if it isn't. |
28 | */ | 28 | */ |
29 | #define get_cpu_var(var) (*({ \ | 29 | #define get_cpu_var(var) (*({ \ |
30 | extern int simple_identifier_##var(void); \ | ||
31 | preempt_disable(); \ | 30 | preempt_disable(); \ |
32 | &__get_cpu_var(var); })) | 31 | &__get_cpu_var(var); })) |
33 | #define put_cpu_var(var) preempt_enable() | 32 | |
33 | /* | ||
34 | * The weird & is necessary because sparse considers (void)(var) to be | ||
35 | * a direct dereference of percpu variable (var). | ||
36 | */ | ||
37 | #define put_cpu_var(var) do { \ | ||
38 | (void)&(var); \ | ||
39 | preempt_enable(); \ | ||
40 | } while (0) | ||
34 | 41 | ||
35 | #ifdef CONFIG_SMP | 42 | #ifdef CONFIG_SMP |
36 | 43 | ||
@@ -127,9 +134,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, | |||
127 | */ | 134 | */ |
128 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) | 135 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) |
129 | 136 | ||
130 | extern void *__alloc_reserved_percpu(size_t size, size_t align); | 137 | extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); |
131 | extern void *__alloc_percpu(size_t size, size_t align); | 138 | extern void __percpu *__alloc_percpu(size_t size, size_t align); |
132 | extern void free_percpu(void *__pdata); | 139 | extern void free_percpu(void __percpu *__pdata); |
133 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | 140 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); |
134 | 141 | ||
135 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | 142 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA |
@@ -140,7 +147,7 @@ extern void __init setup_per_cpu_areas(void); | |||
140 | 147 | ||
141 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) | 148 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
142 | 149 | ||
143 | static inline void *__alloc_percpu(size_t size, size_t align) | 150 | static inline void __percpu *__alloc_percpu(size_t size, size_t align) |
144 | { | 151 | { |
145 | /* | 152 | /* |
146 | * Can't easily make larger alignment work with kmalloc. WARN | 153 | * Can't easily make larger alignment work with kmalloc. WARN |
@@ -151,7 +158,7 @@ static inline void *__alloc_percpu(size_t size, size_t align) | |||
151 | return kzalloc(size, GFP_KERNEL); | 158 | return kzalloc(size, GFP_KERNEL); |
152 | } | 159 | } |
153 | 160 | ||
154 | static inline void free_percpu(void *p) | 161 | static inline void free_percpu(void __percpu *p) |
155 | { | 162 | { |
156 | kfree(p); | 163 | kfree(p); |
157 | } | 164 | } |
@@ -171,7 +178,7 @@ static inline void *pcpu_lpage_remapped(void *kaddr) | |||
171 | #endif /* CONFIG_SMP */ | 178 | #endif /* CONFIG_SMP */ |
172 | 179 | ||
173 | #define alloc_percpu(type) \ | 180 | #define alloc_percpu(type) \ |
174 | (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type)) | 181 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) |
175 | 182 | ||
176 | /* | 183 | /* |
177 | * Optional methods for optimized non-lvalue per-cpu variable access. | 184 | * Optional methods for optimized non-lvalue per-cpu variable access. |
@@ -188,17 +195,19 @@ static inline void *pcpu_lpage_remapped(void *kaddr) | |||
188 | #ifndef percpu_read | 195 | #ifndef percpu_read |
189 | # define percpu_read(var) \ | 196 | # define percpu_read(var) \ |
190 | ({ \ | 197 | ({ \ |
191 | typeof(per_cpu_var(var)) __tmp_var__; \ | 198 | typeof(var) *pr_ptr__ = &(var); \ |
192 | __tmp_var__ = get_cpu_var(var); \ | 199 | typeof(var) pr_ret__; \ |
193 | put_cpu_var(var); \ | 200 | pr_ret__ = get_cpu_var(*pr_ptr__); \ |
194 | __tmp_var__; \ | 201 | put_cpu_var(*pr_ptr__); \ |
202 | pr_ret__; \ | ||
195 | }) | 203 | }) |
196 | #endif | 204 | #endif |
197 | 205 | ||
198 | #define __percpu_generic_to_op(var, val, op) \ | 206 | #define __percpu_generic_to_op(var, val, op) \ |
199 | do { \ | 207 | do { \ |
200 | get_cpu_var(var) op val; \ | 208 | typeof(var) *pgto_ptr__ = &(var); \ |
201 | put_cpu_var(var); \ | 209 | get_cpu_var(*pgto_ptr__) op val; \ |
210 | put_cpu_var(*pgto_ptr__); \ | ||
202 | } while (0) | 211 | } while (0) |
203 | 212 | ||
204 | #ifndef percpu_write | 213 | #ifndef percpu_write |
@@ -234,6 +243,7 @@ extern void __bad_size_call_parameter(void); | |||
234 | 243 | ||
235 | #define __pcpu_size_call_return(stem, variable) \ | 244 | #define __pcpu_size_call_return(stem, variable) \ |
236 | ({ typeof(variable) pscr_ret__; \ | 245 | ({ typeof(variable) pscr_ret__; \ |
246 | __verify_pcpu_ptr(&(variable)); \ | ||
237 | switch(sizeof(variable)) { \ | 247 | switch(sizeof(variable)) { \ |
238 | case 1: pscr_ret__ = stem##1(variable);break; \ | 248 | case 1: pscr_ret__ = stem##1(variable);break; \ |
239 | case 2: pscr_ret__ = stem##2(variable);break; \ | 249 | case 2: pscr_ret__ = stem##2(variable);break; \ |
@@ -247,6 +257,7 @@ extern void __bad_size_call_parameter(void); | |||
247 | 257 | ||
248 | #define __pcpu_size_call(stem, variable, ...) \ | 258 | #define __pcpu_size_call(stem, variable, ...) \ |
249 | do { \ | 259 | do { \ |
260 | __verify_pcpu_ptr(&(variable)); \ | ||
250 | switch(sizeof(variable)) { \ | 261 | switch(sizeof(variable)) { \ |
251 | case 1: stem##1(variable, __VA_ARGS__);break; \ | 262 | case 1: stem##1(variable, __VA_ARGS__);break; \ |
252 | case 2: stem##2(variable, __VA_ARGS__);break; \ | 263 | case 2: stem##2(variable, __VA_ARGS__);break; \ |
@@ -259,8 +270,7 @@ do { \ | |||
259 | 270 | ||
260 | /* | 271 | /* |
261 | * Optimized manipulation for memory allocated through the per cpu | 272 | * Optimized manipulation for memory allocated through the per cpu |
262 | * allocator or for addresses of per cpu variables (can be determined | 273 | * allocator or for addresses of per cpu variables. |
263 | * using per_cpu_var(xx). | ||
264 | * | 274 | * |
265 | * These operation guarantee exclusivity of access for other operations | 275 | * These operation guarantee exclusivity of access for other operations |
266 | * on the *same* processor. The assumption is that per cpu data is only | 276 | * on the *same* processor. The assumption is that per cpu data is only |
@@ -311,7 +321,7 @@ do { \ | |||
311 | #define _this_cpu_generic_to_op(pcp, val, op) \ | 321 | #define _this_cpu_generic_to_op(pcp, val, op) \ |
312 | do { \ | 322 | do { \ |
313 | preempt_disable(); \ | 323 | preempt_disable(); \ |
314 | *__this_cpu_ptr(&pcp) op val; \ | 324 | *__this_cpu_ptr(&(pcp)) op val; \ |
315 | preempt_enable(); \ | 325 | preempt_enable(); \ |
316 | } while (0) | 326 | } while (0) |
317 | 327 | ||