diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-10-29 09:34:15 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-10-29 09:34:15 -0400 |
commit | e0fdb0e050eae331046385643618f12452aa7e73 (patch) | |
tree | 6156f577879764fd75cf8c46ca7980433de0e59d /include/linux/percpu.h | |
parent | f7b64fe806029e0a0454df132eec3c5ab576102c (diff) |
percpu: add __percpu for sparse.
We have to make __kernel "__attribute__((address_space(0)))" so we can
cast to it.
tj: * put_cpu_var() update.
* Annotations added to dynamic allocator interface.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include/linux/percpu.h')
-rw-r--r-- | include/linux/percpu.h | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index f965f833a643..2c0d31a3f6b6 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -30,8 +30,12 @@ | |||
30 | preempt_disable(); \ | 30 | preempt_disable(); \ |
31 | &__get_cpu_var(var); })) | 31 | &__get_cpu_var(var); })) |
32 | 32 | ||
33 | /* | ||
34 | * The weird & is necessary because sparse considers (void)(var) to be | ||
35 | * a direct dereference of percpu variable (var). | ||
36 | */ | ||
33 | #define put_cpu_var(var) do { \ | 37 | #define put_cpu_var(var) do { \ |
34 | (void)(var); \ | 38 | (void)&(var); \ |
35 | preempt_enable(); \ | 39 | preempt_enable(); \ |
36 | } while (0) | 40 | } while (0) |
37 | 41 | ||
@@ -130,9 +134,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, | |||
130 | */ | 134 | */ |
131 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) | 135 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) |
132 | 136 | ||
133 | extern void *__alloc_reserved_percpu(size_t size, size_t align); | 137 | extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); |
134 | extern void *__alloc_percpu(size_t size, size_t align); | 138 | extern void __percpu *__alloc_percpu(size_t size, size_t align); |
135 | extern void free_percpu(void *__pdata); | 139 | extern void free_percpu(void __percpu *__pdata); |
136 | 140 | ||
137 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | 141 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA |
138 | extern void __init setup_per_cpu_areas(void); | 142 | extern void __init setup_per_cpu_areas(void); |
@@ -142,7 +146,7 @@ extern void __init setup_per_cpu_areas(void); | |||
142 | 146 | ||
143 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) | 147 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
144 | 148 | ||
145 | static inline void *__alloc_percpu(size_t size, size_t align) | 149 | static inline void __percpu *__alloc_percpu(size_t size, size_t align) |
146 | { | 150 | { |
147 | /* | 151 | /* |
148 | * Can't easily make larger alignment work with kmalloc. WARN | 152 | * Can't easily make larger alignment work with kmalloc. WARN |
@@ -153,7 +157,7 @@ static inline void *__alloc_percpu(size_t size, size_t align) | |||
153 | return kzalloc(size, GFP_KERNEL); | 157 | return kzalloc(size, GFP_KERNEL); |
154 | } | 158 | } |
155 | 159 | ||
156 | static inline void free_percpu(void *p) | 160 | static inline void free_percpu(void __percpu *p) |
157 | { | 161 | { |
158 | kfree(p); | 162 | kfree(p); |
159 | } | 163 | } |
@@ -168,7 +172,7 @@ static inline void *pcpu_lpage_remapped(void *kaddr) | |||
168 | #endif /* CONFIG_SMP */ | 172 | #endif /* CONFIG_SMP */ |
169 | 173 | ||
170 | #define alloc_percpu(type) \ | 174 | #define alloc_percpu(type) \ |
171 | (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type)) | 175 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) |
172 | 176 | ||
173 | /* | 177 | /* |
174 | * Optional methods for optimized non-lvalue per-cpu variable access. | 178 | * Optional methods for optimized non-lvalue per-cpu variable access. |