aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-03-30 06:07:44 -0400
committerTejun Heo <tj@kernel.org>2009-06-24 02:13:35 -0400
commite74e396204bfcb67570ba4517b08f5918e69afea (patch)
treedf57c859e10f7fcbe5790e9b51a106d5bccfe8dc /include
parent0017c869ddcb73069905d09f9e98e68627466237 (diff)
percpu: use dynamic percpu allocator as the default percpu allocator
This patch makes most !CONFIG_HAVE_SETUP_PER_CPU_AREA archs use dynamic percpu allocator. The first chunk is allocated using embedding helper and 8k is reserved for modules. This ensures that the new allocator behaves almost identically to the original allocator as long as static percpu variables are concerned, so it shouldn't introduce much breakage. s390 and alpha use custom SHIFT_PERCPU_PTR() to work around addressing range limit the addressing model imposes. Unfortunately, this breaks if the address is specified using a variable, so for now, the two archs aren't converted. The following architectures are affected by this change. * sh * arm * cris * mips * sparc(32) * blackfin * avr32 * parisc (broken, under investigation) * m32r * powerpc(32) As this change makes the dynamic allocator the default one, CONFIG_HAVE_DYNAMIC_PER_CPU_AREA is replaced with its invert - CONFIG_HAVE_LEGACY_PER_CPU_AREA, which is added to yet-to-be converted archs. These archs implement their own setup_per_cpu_areas() and the conversion is not trivial. * powerpc(64) * sparc(64) * ia64 * alpha * s390 Boot and batch alloc/free tests on x86_32 with debug code (x86_32 doesn't use default first chunk initialization). Compile tested on sparc(32), powerpc(32), arm and alpha. Kyle McMartin reported that this change breaks parisc. The problem is still under investigation and he is okay with pushing this patch forward and fixing parisc later. [ Impact: use dynamic allocator for most archs w/o custom percpu setup ] Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Reviewed-by: Christoph Lameter <cl@linux.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Mikael Starvik <starvik@axis.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Bryan Wu <cooloney@kernel.org> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Matthew Wilcox <matthew@wil.cx> Cc: Grant Grundler <grundler@parisc-linux.org> Cc: Hirokazu Takata <takata@linux-m32r.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/linux/percpu.h12
1 files changed, 9 insertions, 3 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 26fd9d12f05..e5000343dd6 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -34,7 +34,7 @@
34 34
35#ifdef CONFIG_SMP 35#ifdef CONFIG_SMP
36 36
37#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA 37#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
38 38
39/* minimum unit size, also is the maximum supported allocation size */ 39/* minimum unit size, also is the maximum supported allocation size */
40#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) 40#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
@@ -80,7 +80,7 @@ extern ssize_t __init pcpu_embed_first_chunk(
80 80
81extern void *__alloc_reserved_percpu(size_t size, size_t align); 81extern void *__alloc_reserved_percpu(size_t size, size_t align);
82 82
83#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ 83#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
84 84
85struct percpu_data { 85struct percpu_data {
86 void *ptrs[1]; 86 void *ptrs[1];
@@ -99,11 +99,15 @@ struct percpu_data {
99 (__typeof__(ptr))__p->ptrs[(cpu)]; \ 99 (__typeof__(ptr))__p->ptrs[(cpu)]; \
100}) 100})
101 101
102#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ 102#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
103 103
104extern void *__alloc_percpu(size_t size, size_t align); 104extern void *__alloc_percpu(size_t size, size_t align);
105extern void free_percpu(void *__pdata); 105extern void free_percpu(void *__pdata);
106 106
107#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
108extern void __init setup_per_cpu_areas(void);
109#endif
110
107#else /* CONFIG_SMP */ 111#else /* CONFIG_SMP */
108 112
109#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 113#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
@@ -124,6 +128,8 @@ static inline void free_percpu(void *p)
124 kfree(p); 128 kfree(p);
125} 129}
126 130
131static inline void __init setup_per_cpu_areas(void) { }
132
127#endif /* CONFIG_SMP */ 133#endif /* CONFIG_SMP */
128 134
129#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ 135#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \