aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-08-14 02:00:53 -0400
committerTejun Heo <tj@kernel.org>2009-08-14 02:00:53 -0400
commitc2a7e818019f20a5cf7fb26a6eb59e212e6c0cd8 (patch)
tree0cbd22be697e3f3df7c364540670d4f7622f34ea /arch/powerpc
parentbcb2107fdbecef3de55d597d23453747af81ba88 (diff)
powerpc64: convert to dynamic percpu allocator
Now that percpu allows arbitrary embedding of the first chunk, powerpc64 can easily be converted to dynamic percpu allocator. Convert it. powerpc supports several large page sizes. Cap atom_size at 1M. There isn't much to gain by going above that anyway. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/kernel/setup_64.c61
2 files changed, 47 insertions, 18 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 61bbffa2fe60..2c42e1526d03 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -46,10 +46,10 @@ config GENERIC_HARDIRQS_NO__DO_IRQ
46 bool 46 bool
47 default y 47 default y
48 48
49config HAVE_LEGACY_PER_CPU_AREA 49config HAVE_SETUP_PER_CPU_AREA
50 def_bool PPC64 50 def_bool PPC64
51 51
52config HAVE_SETUP_PER_CPU_AREA 52config NEED_PER_CPU_EMBED_FIRST_CHUNK
53 def_bool PPC64 53 def_bool PPC64
54 54
55config IRQ_PER_CPU 55config IRQ_PER_CPU
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 1f6816003ebe..aa6e4500635f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -57,6 +57,7 @@
57#include <asm/cache.h> 57#include <asm/cache.h>
58#include <asm/page.h> 58#include <asm/page.h>
59#include <asm/mmu.h> 59#include <asm/mmu.h>
60#include <asm/mmu-hash64.h>
60#include <asm/firmware.h> 61#include <asm/firmware.h>
61#include <asm/xmon.h> 62#include <asm/xmon.h>
62#include <asm/udbg.h> 63#include <asm/udbg.h>
@@ -569,25 +570,53 @@ void cpu_die(void)
569} 570}
570 571
571#ifdef CONFIG_SMP 572#ifdef CONFIG_SMP
572void __init setup_per_cpu_areas(void) 573#define PCPU_DYN_SIZE ()
574
575static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
573{ 576{
574 int i; 577 return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
575 unsigned long size; 578 __pa(MAX_DMA_ADDRESS));
576 char *ptr; 579}
577
578 /* Copy section for each CPU (we discard the original) */
579 size = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE);
580#ifdef CONFIG_MODULES
581 if (size < PERCPU_ENOUGH_ROOM)
582 size = PERCPU_ENOUGH_ROOM;
583#endif
584 580
585 for_each_possible_cpu(i) { 581static void __init pcpu_fc_free(void *ptr, size_t size)
586 ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size); 582{
583 free_bootmem(__pa(ptr), size);
584}
587 585
588 paca[i].data_offset = ptr - __per_cpu_start; 586static int pcpu_cpu_distance(unsigned int from, unsigned int to)
589 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 587{
590 } 588 if (cpu_to_node(from) == cpu_to_node(to))
589 return LOCAL_DISTANCE;
590 else
591 return REMOTE_DISTANCE;
592}
593
594void __init setup_per_cpu_areas(void)
595{
596 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
597 size_t atom_size;
598 unsigned long delta;
599 unsigned int cpu;
600 int rc;
601
602 /*
603 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
604 * to group units. For larger mappings, use 1M atom which
605 * should be large enough to contain a number of units.
606 */
607 if (mmu_linear_psize == MMU_PAGE_4K)
608 atom_size = PAGE_SIZE;
609 else
610 atom_size = 1 << 20;
611
612 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
613 pcpu_fc_alloc, pcpu_fc_free);
614 if (rc < 0)
615 panic("cannot initialize percpu area (err=%d)", rc);
616
617 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
618 for_each_possible_cpu(cpu)
619 paca[cpu].data_offset = delta + pcpu_unit_offsets[cpu];
591} 620}
592#endif 621#endif
593 622