aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-03-24 04:06:43 -0400
committerTejun Heo <tj@kernel.org>2010-03-30 09:02:32 -0400
commitde380b55f92986c1a84198149cb71b7228d15fbd (patch)
treedce7168802e1e65754c9b6455d0527dfa853168c
parentea5a9f0c3447889abceb7482c391bb977472eab9 (diff)
percpu: don't implicitly include slab.h from percpu.h
percpu.h has always been including slab.h to get k[mz]alloc/free() for UP inline implementation. percpu.h being used by very low level headers including module.h and sched.h, this meant that a lot files unintentionally got slab.h inclusion. Lee Schermerhorn was trying to make topology.h use percpu.h and got bitten by this implicit inclusion. The right thing to do is break this ultimately unnecessary dependency. The previous patch added explicit inclusion of either gfp.h or slab.h to the source files using them. This patch updates percpu.h such that slab.h is no longer included from percpu.h. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
-rw-r--r--include/linux/percpu.h30
-rw-r--r--mm/Makefile6
-rw-r--r--mm/percpu_up.c30
3 files changed, 40 insertions, 26 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index a93e5bfdccb8..c7845130bfdf 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -2,10 +2,10 @@
2#define __LINUX_PERCPU_H 2#define __LINUX_PERCPU_H
3 3
4#include <linux/preempt.h> 4#include <linux/preempt.h>
5#include <linux/slab.h> /* For kmalloc() */
6#include <linux/smp.h> 5#include <linux/smp.h>
7#include <linux/cpumask.h> 6#include <linux/cpumask.h>
8#include <linux/pfn.h> 7#include <linux/pfn.h>
8#include <linux/init.h>
9 9
10#include <asm/percpu.h> 10#include <asm/percpu.h>
11 11
@@ -135,9 +135,6 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
135#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) 135#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
136 136
137extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); 137extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
138extern void __percpu *__alloc_percpu(size_t size, size_t align);
139extern void free_percpu(void __percpu *__pdata);
140extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
141 138
142#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 139#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
143extern void __init setup_per_cpu_areas(void); 140extern void __init setup_per_cpu_areas(void);
@@ -147,27 +144,6 @@ extern void __init setup_per_cpu_areas(void);
147 144
148#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 145#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
149 146
150static inline void __percpu *__alloc_percpu(size_t size, size_t align)
151{
152 /*
153 * Can't easily make larger alignment work with kmalloc. WARN
154 * on it. Larger alignment should only be used for module
155 * percpu sections on SMP for which this path isn't used.
156 */
157 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
158 return kzalloc(size, GFP_KERNEL);
159}
160
161static inline void free_percpu(void __percpu *p)
162{
163 kfree(p);
164}
165
166static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
167{
168 return __pa(addr);
169}
170
171static inline void __init setup_per_cpu_areas(void) { } 147static inline void __init setup_per_cpu_areas(void) { }
172 148
173static inline void *pcpu_lpage_remapped(void *kaddr) 149static inline void *pcpu_lpage_remapped(void *kaddr)
@@ -177,6 +153,10 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
177 153
178#endif /* CONFIG_SMP */ 154#endif /* CONFIG_SMP */
179 155
156extern void __percpu *__alloc_percpu(size_t size, size_t align);
157extern void free_percpu(void __percpu *__pdata);
158extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
159
180#define alloc_percpu(type) \ 160#define alloc_percpu(type) \
181 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) 161 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
182 162
diff --git a/mm/Makefile b/mm/Makefile
index 7a68d2ab5560..6c2a73a54a43 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -33,7 +33,11 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
33obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 33obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
34obj-$(CONFIG_FS_XIP) += filemap_xip.o 34obj-$(CONFIG_FS_XIP) += filemap_xip.o
35obj-$(CONFIG_MIGRATION) += migrate.o 35obj-$(CONFIG_MIGRATION) += migrate.o
36obj-$(CONFIG_SMP) += percpu.o 36ifdef CONFIG_SMP
37obj-y += percpu.o
38else
39obj-y += percpu_up.o
40endif
37obj-$(CONFIG_QUICKLIST) += quicklist.o 41obj-$(CONFIG_QUICKLIST) += quicklist.o
38obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o 42obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
39obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o 43obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
diff --git a/mm/percpu_up.c b/mm/percpu_up.c
new file mode 100644
index 000000000000..c4351c7f57d2
--- /dev/null
+++ b/mm/percpu_up.c
@@ -0,0 +1,30 @@
1/*
2 * mm/percpu_up.c - dummy percpu memory allocator implementation for UP
3 */
4
5#include <linux/module.h>
6#include <linux/percpu.h>
7#include <linux/slab.h>
8
9void __percpu *__alloc_percpu(size_t size, size_t align)
10{
11 /*
12 * Can't easily make larger alignment work with kmalloc. WARN
13 * on it. Larger alignment should only be used for module
14 * percpu sections on SMP for which this path isn't used.
15 */
16 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
17 return kzalloc(size, GFP_KERNEL);
18}
19EXPORT_SYMBOL_GPL(__alloc_percpu);
20
21void free_percpu(void __percpu *p)
22{
23 kfree(p);
24}
25EXPORT_SYMBOL_GPL(free_percpu);
26
27phys_addr_t per_cpu_ptr_to_phys(void *addr)
28{
29 return __pa(addr);
30}