aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-02-20 02:29:09 -0500
committerTejun Heo <tj@kernel.org>2009-02-20 02:29:09 -0500
commit11124411aa95827404d6bfdfc14c908e1b54513c (patch)
treee613edf6df9d1130e0ebe853ba1390c8389b25b3
parentfbf59bc9d74d1fb30b8e0630743aff2806eafcea (diff)
x86: convert to the new dynamic percpu allocator
Impact: use new dynamic allocator, unified access to static/dynamic percpu memory Convert to the new dynamic percpu allocator. * implement populate_extra_pte() for both 32 and 64 * update setup_per_cpu_areas() to use pcpu_setup_static() * define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() * define config HAVE_DYNAMIC_PER_CPU_AREA Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/include/asm/percpu.h8
-rw-r--r--arch/x86/include/asm/pgtable.h1
-rw-r--r--arch/x86/kernel/setup_percpu.c62
-rw-r--r--arch/x86/mm/init_32.c10
-rw-r--r--arch/x86/mm/init_64.c19
6 files changed, 81 insertions, 22 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f760a22f95dc..d3f6eadfd4ba 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -135,6 +135,9 @@ config ARCH_HAS_CACHE_LINE_SIZE
135config HAVE_SETUP_PER_CPU_AREA 135config HAVE_SETUP_PER_CPU_AREA
136 def_bool y 136 def_bool y
137 137
138config HAVE_DYNAMIC_PER_CPU_AREA
139 def_bool y
140
138config HAVE_CPUMASK_OF_CPU_MAP 141config HAVE_CPUMASK_OF_CPU_MAP
139 def_bool X86_64_SMP 142 def_bool X86_64_SMP
140 143
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index aee103b26d01..8f1d2fbec1d4 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -43,6 +43,14 @@
43#else /* ...!ASSEMBLY */ 43#else /* ...!ASSEMBLY */
44 44
45#include <linux/stringify.h> 45#include <linux/stringify.h>
46#include <asm/sections.h>
47
48#define __addr_to_pcpu_ptr(addr) \
49 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
50 + (unsigned long)__per_cpu_start)
51#define __pcpu_ptr_to_addr(ptr) \
52 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
53 - (unsigned long)__per_cpu_start)
46 54
47#ifdef CONFIG_SMP 55#ifdef CONFIG_SMP
48#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x 56#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 6f7c102018bf..dd91c2515c64 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -402,6 +402,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
402 402
403/* Install a pte for a particular vaddr in kernel space. */ 403/* Install a pte for a particular vaddr in kernel space. */
404void set_pte_vaddr(unsigned long vaddr, pte_t pte); 404void set_pte_vaddr(unsigned long vaddr, pte_t pte);
405void populate_extra_pte(unsigned long vaddr);
405 406
406#ifdef CONFIG_X86_32 407#ifdef CONFIG_X86_32
407extern void native_pagetable_setup_start(pgd_t *base); 408extern void native_pagetable_setup_start(pgd_t *base);
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index d992e6cff730..2dce43558217 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -61,38 +61,56 @@ static inline void setup_percpu_segment(int cpu)
61 */ 61 */
62void __init setup_per_cpu_areas(void) 62void __init setup_per_cpu_areas(void)
63{ 63{
64 ssize_t size; 64 ssize_t size = __per_cpu_end - __per_cpu_start;
65 char *ptr; 65 unsigned int nr_cpu_pages = DIV_ROUND_UP(size, PAGE_SIZE);
66 int cpu; 66 static struct page **pages;
67 67 size_t pages_size;
68 /* Copy section for each CPU (we discard the original) */ 68 unsigned int cpu, i, j;
69 size = roundup(PERCPU_ENOUGH_ROOM, PAGE_SIZE); 69 unsigned long delta;
70 size_t pcpu_unit_size;
70 71
71 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 72 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
72 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 73 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
74 pr_info("PERCPU: Allocating %zd bytes for static per cpu data\n", size);
73 75
74 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size); 76 pages_size = nr_cpu_pages * num_possible_cpus() * sizeof(pages[0]);
77 pages = alloc_bootmem(pages_size);
75 78
79 j = 0;
76 for_each_possible_cpu(cpu) { 80 for_each_possible_cpu(cpu) {
81 void *ptr;
82
83 for (i = 0; i < nr_cpu_pages; i++) {
77#ifndef CONFIG_NEED_MULTIPLE_NODES 84#ifndef CONFIG_NEED_MULTIPLE_NODES
78 ptr = alloc_bootmem_pages(size); 85 ptr = alloc_bootmem_pages(PAGE_SIZE);
79#else 86#else
80 int node = early_cpu_to_node(cpu); 87 int node = early_cpu_to_node(cpu);
81 if (!node_online(node) || !NODE_DATA(node)) { 88
82 ptr = alloc_bootmem_pages(size); 89 if (!node_online(node) || !NODE_DATA(node)) {
83 pr_info("cpu %d has no node %d or node-local memory\n", 90 ptr = alloc_bootmem_pages(PAGE_SIZE);
84 cpu, node); 91 pr_info("cpu %d has no node %d or node-local "
85 pr_debug("per cpu data for cpu%d at %016lx\n", 92 "memory\n", cpu, node);
86 cpu, __pa(ptr)); 93 pr_debug("per cpu data for cpu%d at %016lx\n",
87 } else { 94 cpu, __pa(ptr));
88 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); 95 } else {
89 pr_debug("per cpu data for cpu%d on node%d at %016lx\n", 96 ptr = alloc_bootmem_pages_node(NODE_DATA(node),
90 cpu, node, __pa(ptr)); 97 PAGE_SIZE);
91 } 98 pr_debug("per cpu data for cpu%d on node%d "
99 "at %016lx\n", cpu, node, __pa(ptr));
100 }
92#endif 101#endif
102 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
103 pages[j++] = virt_to_page(ptr);
104 }
105 }
106
107 pcpu_unit_size = pcpu_setup_static(populate_extra_pte, pages, size);
93 108
94 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); 109 free_bootmem(__pa(pages), pages_size);
95 per_cpu_offset(cpu) = ptr - __per_cpu_start; 110
111 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
112 for_each_possible_cpu(cpu) {
113 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
96 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); 114 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
97 per_cpu(cpu_number, cpu) = cpu; 115 per_cpu(cpu_number, cpu) = cpu;
98 setup_percpu_segment(cpu); 116 setup_percpu_segment(cpu);
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 00263bf07a88..8b1a0ef7f874 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -137,6 +137,16 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
137 return pte_offset_kernel(pmd, 0); 137 return pte_offset_kernel(pmd, 0);
138} 138}
139 139
140void __init populate_extra_pte(unsigned long vaddr)
141{
142 int pgd_idx = pgd_index(vaddr);
143 int pmd_idx = pmd_index(vaddr);
144 pmd_t *pmd;
145
146 pmd = one_md_table_init(swapper_pg_dir + pgd_idx);
147 one_page_table_init(pmd + pmd_idx);
148}
149
140static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, 150static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
141 unsigned long vaddr, pte_t *lastpte) 151 unsigned long vaddr, pte_t *lastpte)
142{ 152{
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index e6d36b490250..7f91e2cdc4ce 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -223,6 +223,25 @@ set_pte_vaddr(unsigned long vaddr, pte_t pteval)
223 set_pte_vaddr_pud(pud_page, vaddr, pteval); 223 set_pte_vaddr_pud(pud_page, vaddr, pteval);
224} 224}
225 225
226void __init populate_extra_pte(unsigned long vaddr)
227{
228 pgd_t *pgd;
229 pud_t *pud;
230
231 pgd = pgd_offset_k(vaddr);
232 if (pgd_none(*pgd)) {
233 pud = (pud_t *)spp_getpage();
234 pgd_populate(&init_mm, pgd, pud);
235 if (pud != pud_offset(pgd, 0)) {
236 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
237 pud, pud_offset(pgd, 0));
238 return;
239 }
240 }
241
242 set_pte_vaddr_pud((pud_t *)pgd_page_vaddr(*pgd), vaddr, __pte(0));
243}
244
226/* 245/*
227 * Create large page table mappings for a range of physical addresses. 246 * Create large page table mappings for a range of physical addresses.
228 */ 247 */