aboutsummaryrefslogtreecommitdiffstats
path: root/init/main.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-03-30 06:07:44 -0400
committerTejun Heo <tj@kernel.org>2009-06-24 02:13:35 -0400
commite74e396204bfcb67570ba4517b08f5918e69afea (patch)
treedf57c859e10f7fcbe5790e9b51a106d5bccfe8dc /init/main.c
parent0017c869ddcb73069905d09f9e98e68627466237 (diff)
percpu: use dynamic percpu allocator as the default percpu allocator
This patch makes most !CONFIG_HAVE_SETUP_PER_CPU_AREA archs use dynamic percpu allocator. The first chunk is allocated using embedding helper and 8k is reserved for modules. This ensures that the new allocator behaves almost identically to the original allocator as long as static percpu variables are concerned, so it shouldn't introduce much breakage. s390 and alpha use custom SHIFT_PERCPU_PTR() to work around addressing range limit the addressing model imposes. Unfortunately, this breaks if the address is specified using a variable, so for now, the two archs aren't converted. The following architectures are affected by this change. * sh * arm * cris * mips * sparc(32) * blackfin * avr32 * parisc (broken, under investigation) * m32r * powerpc(32) As this change makes the dynamic allocator the default one, CONFIG_HAVE_DYNAMIC_PER_CPU_AREA is replaced with its invert - CONFIG_HAVE_LEGACY_PER_CPU_AREA, which is added to yet-to-be converted archs. These archs implement their own setup_per_cpu_areas() and the conversion is not trivial. * powerpc(64) * sparc(64) * ia64 * alpha * s390 Boot and batch alloc/free tests on x86_32 with debug code (x86_32 doesn't use default first chunk initialization). Compile tested on sparc(32), powerpc(32), arm and alpha. Kyle McMartin reported that this change breaks parisc. The problem is still under investigation and he is okay with pushing this patch forward and fixing parisc later. [ Impact: use dynamic allocator for most archs w/o custom percpu setup ] Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Reviewed-by: Christoph Lameter <cl@linux.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Mikael Starvik <starvik@axis.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Bryan Wu <cooloney@kernel.org> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Matthew Wilcox <matthew@wil.cx> Cc: Grant Grundler <grundler@parisc-linux.org> Cc: Hirokazu Takata <takata@linux-m32r.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'init/main.c')
-rw-r--r--init/main.c24
1 files changed, 0 insertions, 24 deletions
diff --git a/init/main.c b/init/main.c
index 09131ec090c1..602d724afa5c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -357,7 +357,6 @@ static void __init smp_init(void)
357#define smp_init() do { } while (0) 357#define smp_init() do { } while (0)
358#endif 358#endif
359 359
360static inline void setup_per_cpu_areas(void) { }
361static inline void setup_nr_cpu_ids(void) { } 360static inline void setup_nr_cpu_ids(void) { }
362static inline void smp_prepare_cpus(unsigned int maxcpus) { } 361static inline void smp_prepare_cpus(unsigned int maxcpus) { }
363 362
@@ -378,29 +377,6 @@ static void __init setup_nr_cpu_ids(void)
378 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; 377 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
379} 378}
380 379
381#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
382unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
383
384EXPORT_SYMBOL(__per_cpu_offset);
385
386static void __init setup_per_cpu_areas(void)
387{
388 unsigned long size, i;
389 char *ptr;
390 unsigned long nr_possible_cpus = num_possible_cpus();
391
392 /* Copy section for each CPU (we discard the original) */
393 size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
394 ptr = alloc_bootmem_pages(size * nr_possible_cpus);
395
396 for_each_possible_cpu(i) {
397 __per_cpu_offset[i] = ptr - __per_cpu_start;
398 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
399 ptr += size;
400 }
401}
402#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
403
404/* Called by boot processor to activate the rest. */ 380/* Called by boot processor to activate the rest. */
405static void __init smp_init(void) 381static void __init smp_init(void)
406{ 382{