aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-03-24 04:06:43 -0400
committerTejun Heo <tj@kernel.org>2010-03-30 09:02:32 -0400
commitde380b55f92986c1a84198149cb71b7228d15fbd (patch)
treedce7168802e1e65754c9b6455d0527dfa853168c /include/linux/percpu.h
parentea5a9f0c3447889abceb7482c391bb977472eab9 (diff)
percpu: don't implicitly include slab.h from percpu.h
percpu.h has always been including slab.h to get k[mz]alloc/free() for UP inline implementation. percpu.h being used by very low level headers including module.h and sched.h, this meant that a lot files unintentionally got slab.h inclusion. Lee Schermerhorn was trying to make topology.h use percpu.h and got bitten by this implicit inclusion. The right thing to do is break this ultimately unnecessary dependency. The previous patch added explicit inclusion of either gfp.h or slab.h to the source files using them. This patch updates percpu.h such that slab.h is no longer included from percpu.h. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Diffstat (limited to 'include/linux/percpu.h')
-rw-r--r--include/linux/percpu.h30
1 files changed, 5 insertions, 25 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index a93e5bfdccb8..c7845130bfdf 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -2,10 +2,10 @@
2#define __LINUX_PERCPU_H 2#define __LINUX_PERCPU_H
3 3
4#include <linux/preempt.h> 4#include <linux/preempt.h>
5#include <linux/slab.h> /* For kmalloc() */
6#include <linux/smp.h> 5#include <linux/smp.h>
7#include <linux/cpumask.h> 6#include <linux/cpumask.h>
8#include <linux/pfn.h> 7#include <linux/pfn.h>
8#include <linux/init.h>
9 9
10#include <asm/percpu.h> 10#include <asm/percpu.h>
11 11
@@ -135,9 +135,6 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
135#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) 135#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
136 136
137extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); 137extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
138extern void __percpu *__alloc_percpu(size_t size, size_t align);
139extern void free_percpu(void __percpu *__pdata);
140extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
141 138
142#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 139#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
143extern void __init setup_per_cpu_areas(void); 140extern void __init setup_per_cpu_areas(void);
@@ -147,27 +144,6 @@ extern void __init setup_per_cpu_areas(void);
147 144
148#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 145#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
149 146
150static inline void __percpu *__alloc_percpu(size_t size, size_t align)
151{
152 /*
153 * Can't easily make larger alignment work with kmalloc. WARN
154 * on it. Larger alignment should only be used for module
155 * percpu sections on SMP for which this path isn't used.
156 */
157 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
158 return kzalloc(size, GFP_KERNEL);
159}
160
161static inline void free_percpu(void __percpu *p)
162{
163 kfree(p);
164}
165
166static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
167{
168 return __pa(addr);
169}
170
171static inline void __init setup_per_cpu_areas(void) { } 147static inline void __init setup_per_cpu_areas(void) { }
172 148
173static inline void *pcpu_lpage_remapped(void *kaddr) 149static inline void *pcpu_lpage_remapped(void *kaddr)
@@ -177,6 +153,10 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
177 153
178#endif /* CONFIG_SMP */ 154#endif /* CONFIG_SMP */
179 155
156extern void __percpu *__alloc_percpu(size_t size, size_t align);
157extern void free_percpu(void __percpu *__pdata);
158extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
159
180#define alloc_percpu(type) \ 160#define alloc_percpu(type) \
181 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) 161 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
182 162