aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-06-21 22:56:24 -0400
committerTejun Heo <tj@kernel.org>2009-06-21 22:56:24 -0400
commit97c9bf0618cd40b05b4859c1f8a90d8ad97fefb2 (patch)
tree0783d59b58fef4c1717aac1b2402c68beedb3986 /arch/x86
parentc5806df9232d2a7f554b4839b57cac2e664fc256 (diff)
x86: rename remap percpu first chunk allocator to lpage
The "remap" allocator remaps large pages to build the first chunk; however, the name isn't very good because 4k allocator remaps too and the whole point of the remap allocator is using large page mapping. The allocator will be generalized and exported outside of x86, rename it to lpage before that happens. percpu_alloc kernel parameter is updated to accept both "remap" and "lpage" for lpage allocator. [ Impact: code cleanup, kernel parameter argument updated ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/setup_percpu.c50
1 files changed, 25 insertions, 25 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index dfbc7e6c64d4..8794c0c94d2c 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -124,7 +124,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
124} 124}
125 125
126/* 126/*
127 * Remap allocator 127 * Large page remap allocator
128 * 128 *
129 * This allocator uses PMD page as unit. A PMD page is allocated for 129 * This allocator uses PMD page as unit. A PMD page is allocated for
130 * each cpu and each is remapped into vmalloc area using PMD mapping. 130 * each cpu and each is remapped into vmalloc area using PMD mapping.
@@ -137,20 +137,20 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
137 * better than only using 4k mappings while still being NUMA friendly. 137 * better than only using 4k mappings while still being NUMA friendly.
138 */ 138 */
139#ifdef CONFIG_NEED_MULTIPLE_NODES 139#ifdef CONFIG_NEED_MULTIPLE_NODES
140static size_t pcpur_size __initdata; 140static size_t pcpul_size __initdata;
141static void **pcpur_ptrs __initdata; 141static void **pcpul_ptrs __initdata;
142 142
143static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) 143static struct page * __init pcpul_get_page(unsigned int cpu, int pageno)
144{ 144{
145 size_t off = (size_t)pageno << PAGE_SHIFT; 145 size_t off = (size_t)pageno << PAGE_SHIFT;
146 146
147 if (off >= pcpur_size) 147 if (off >= pcpul_size)
148 return NULL; 148 return NULL;
149 149
150 return virt_to_page(pcpur_ptrs[cpu] + off); 150 return virt_to_page(pcpul_ptrs[cpu] + off);
151} 151}
152 152
153static ssize_t __init setup_pcpu_remap(size_t static_size) 153static ssize_t __init setup_pcpu_lpage(size_t static_size)
154{ 154{
155 static struct vm_struct vm; 155 static struct vm_struct vm;
156 size_t ptrs_size, dyn_size; 156 size_t ptrs_size, dyn_size;
@@ -170,36 +170,36 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
170 * Currently supports only single page. Supporting multiple 170 * Currently supports only single page. Supporting multiple
171 * pages won't be too difficult if it ever becomes necessary. 171 * pages won't be too difficult if it ever becomes necessary.
172 */ 172 */
173 pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + 173 pcpul_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
174 PERCPU_DYNAMIC_RESERVE); 174 PERCPU_DYNAMIC_RESERVE);
175 if (pcpur_size > PMD_SIZE) { 175 if (pcpul_size > PMD_SIZE) {
176 pr_warning("PERCPU: static data is larger than large page, " 176 pr_warning("PERCPU: static data is larger than large page, "
177 "can't use large page\n"); 177 "can't use large page\n");
178 return -EINVAL; 178 return -EINVAL;
179 } 179 }
180 dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; 180 dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
181 181
182 /* allocate pointer array and alloc large pages */ 182 /* allocate pointer array and alloc large pages */
183 ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); 183 ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_ptrs[0]));
184 pcpur_ptrs = alloc_bootmem(ptrs_size); 184 pcpul_ptrs = alloc_bootmem(ptrs_size);
185 185
186 for_each_possible_cpu(cpu) { 186 for_each_possible_cpu(cpu) {
187 pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE); 187 pcpul_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
188 if (!pcpur_ptrs[cpu]) 188 if (!pcpul_ptrs[cpu])
189 goto enomem; 189 goto enomem;
190 190
191 /* 191 /*
192 * Only use pcpur_size bytes and give back the rest. 192 * Only use pcpul_size bytes and give back the rest.
193 * 193 *
194 * Ingo: The 2MB up-rounding bootmem is needed to make 194 * Ingo: The 2MB up-rounding bootmem is needed to make
195 * sure the partial 2MB page is still fully RAM - it's 195 * sure the partial 2MB page is still fully RAM - it's
196 * not well-specified to have a PAT-incompatible area 196 * not well-specified to have a PAT-incompatible area
197 * (unmapped RAM, device memory, etc.) in that hole. 197 * (unmapped RAM, device memory, etc.) in that hole.
198 */ 198 */
199 free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size), 199 free_bootmem(__pa(pcpul_ptrs[cpu] + pcpul_size),
200 PMD_SIZE - pcpur_size); 200 PMD_SIZE - pcpul_size);
201 201
202 memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size); 202 memcpy(pcpul_ptrs[cpu], __per_cpu_load, static_size);
203 } 203 }
204 204
205 /* allocate address and map */ 205 /* allocate address and map */
@@ -212,7 +212,7 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
212 212
213 pmd = populate_extra_pmd((unsigned long)vm.addr 213 pmd = populate_extra_pmd((unsigned long)vm.addr
214 + cpu * PMD_SIZE); 214 + cpu * PMD_SIZE);
215 set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])), 215 set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpul_ptrs[cpu])),
216 PAGE_KERNEL_LARGE)); 216 PAGE_KERNEL_LARGE));
217 } 217 }
218 218
@@ -220,22 +220,22 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
220 pr_info("PERCPU: Remapped at %p with large pages, static data " 220 pr_info("PERCPU: Remapped at %p with large pages, static data "
221 "%zu bytes\n", vm.addr, static_size); 221 "%zu bytes\n", vm.addr, static_size);
222 222
223 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, 223 ret = pcpu_setup_first_chunk(pcpul_get_page, static_size,
224 PERCPU_FIRST_CHUNK_RESERVE, dyn_size, 224 PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
225 PMD_SIZE, vm.addr, NULL); 225 PMD_SIZE, vm.addr, NULL);
226 goto out_free_ar; 226 goto out_free_ar;
227 227
228enomem: 228enomem:
229 for_each_possible_cpu(cpu) 229 for_each_possible_cpu(cpu)
230 if (pcpur_ptrs[cpu]) 230 if (pcpul_ptrs[cpu])
231 free_bootmem(__pa(pcpur_ptrs[cpu]), pcpur_size); 231 free_bootmem(__pa(pcpul_ptrs[cpu]), pcpul_size);
232 ret = -ENOMEM; 232 ret = -ENOMEM;
233out_free_ar: 233out_free_ar:
234 free_bootmem(__pa(pcpur_ptrs), ptrs_size); 234 free_bootmem(__pa(pcpul_ptrs), ptrs_size);
235 return ret; 235 return ret;
236} 236}
237#else 237#else
238static ssize_t __init setup_pcpu_remap(size_t static_size) 238static ssize_t __init setup_pcpu_lpage(size_t static_size)
239{ 239{
240 return -EINVAL; 240 return -EINVAL;
241} 241}
@@ -367,7 +367,7 @@ void __init setup_per_cpu_areas(void)
367 * of large page mappings. Please read comments on top of 367 * of large page mappings. Please read comments on top of
368 * each allocator for details. 368 * each allocator for details.
369 */ 369 */
370 ret = setup_pcpu_remap(static_size); 370 ret = setup_pcpu_lpage(static_size);
371 if (ret < 0) 371 if (ret < 0)
372 ret = setup_pcpu_embed(static_size); 372 ret = setup_pcpu_embed(static_size);
373 if (ret < 0) 373 if (ret < 0)