aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup_percpu.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-06-21 22:56:24 -0400
committerTejun Heo <tj@kernel.org>2009-06-21 22:56:24 -0400
commitfa8a7094ba1679b4b9b443e0ac9f5e046c79ee8d (patch)
treecda9df47b1a84581685d8f4e0cd8ce66cac1d234 /arch/x86/kernel/setup_percpu.c
parente59a1bb2fdfb745c685f5b40ffbed126331d3223 (diff)
x86: implement percpu_alloc kernel parameter
According to Andi, it isn't clear whether lpage allocator is worth the trouble as there are many processors where PMD TLB is far scarcer than PTE TLB. The advantage or disadvantage probably depends on the actual size of percpu area and specific processor. As performance degradation due to TLB pressure tends to be highly workload specific and subtle, it is difficult to decide which way to go without more data. This patch implements percpu_alloc kernel parameter to allow selecting which first chunk allocator to use to ease debugging and testing. While at it, make sure all the failure paths report why something failed to help determining why certain allocator isn't working. Also, kill the "Great future plan" comment which had already been realized quite some time ago. [ Impact: allow explicit percpu first chunk allocator selection ] Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Jan Beulich <JBeulich@novell.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/setup_percpu.c')
-rw-r--r--arch/x86/kernel/setup_percpu.c69
1 files changed, 50 insertions, 19 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index bad2fd223114..165ebd5ba83b 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -156,20 +156,23 @@ static struct page * __init pcpul_get_page(unsigned int cpu, int pageno)
156 return virt_to_page(pcpul_map[cpu].ptr + off); 156 return virt_to_page(pcpul_map[cpu].ptr + off);
157} 157}
158 158
159static ssize_t __init setup_pcpu_lpage(size_t static_size) 159static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
160{ 160{
161 size_t map_size, dyn_size; 161 size_t map_size, dyn_size;
162 unsigned int cpu; 162 unsigned int cpu;
163 int i, j; 163 int i, j;
164 ssize_t ret; 164 ssize_t ret;
165 165
166 /* 166 /* on non-NUMA, embedding is better */
167 * If large page isn't supported, there's no benefit in doing 167 if (!chosen && !pcpu_need_numa())
168 * this. Also, on non-NUMA, embedding is better.
169 */
170 if (!cpu_has_pse || !pcpu_need_numa())
171 return -EINVAL; 168 return -EINVAL;
172 169
170 /* need PSE */
171 if (!cpu_has_pse) {
172 pr_warning("PERCPU: lpage allocator requires PSE\n");
173 return -EINVAL;
174 }
175
173 /* 176 /*
174 * Currently supports only single page. Supporting multiple 177 * Currently supports only single page. Supporting multiple
175 * pages won't be too difficult if it ever becomes necessary. 178 * pages won't be too difficult if it ever becomes necessary.
@@ -191,8 +194,11 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size)
191 pcpul_map[cpu].cpu = cpu; 194 pcpul_map[cpu].cpu = cpu;
192 pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE, 195 pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE,
193 PMD_SIZE); 196 PMD_SIZE);
194 if (!pcpul_map[cpu].ptr) 197 if (!pcpul_map[cpu].ptr) {
198 pr_warning("PERCPU: failed to allocate large page "
199 "for cpu%u\n", cpu);
195 goto enomem; 200 goto enomem;
201 }
196 202
197 /* 203 /*
198 * Only use pcpul_size bytes and give back the rest. 204 * Only use pcpul_size bytes and give back the rest.
@@ -297,7 +303,7 @@ void *pcpu_lpage_remapped(void *kaddr)
297 return NULL; 303 return NULL;
298} 304}
299#else 305#else
300static ssize_t __init setup_pcpu_lpage(size_t static_size) 306static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
301{ 307{
302 return -EINVAL; 308 return -EINVAL;
303} 309}
@@ -311,7 +317,7 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size)
311 * mapping so that it can use PMD mapping without additional TLB 317 * mapping so that it can use PMD mapping without additional TLB
312 * pressure. 318 * pressure.
313 */ 319 */
314static ssize_t __init setup_pcpu_embed(size_t static_size) 320static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen)
315{ 321{
316 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; 322 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
317 323
@@ -320,7 +326,7 @@ static ssize_t __init setup_pcpu_embed(size_t static_size)
320 * this. Also, embedding allocation doesn't play well with 326 * this. Also, embedding allocation doesn't play well with
321 * NUMA. 327 * NUMA.
322 */ 328 */
323 if (!cpu_has_pse || pcpu_need_numa()) 329 if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
324 return -EINVAL; 330 return -EINVAL;
325 331
326 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, 332 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
@@ -370,8 +376,11 @@ static ssize_t __init setup_pcpu_4k(size_t static_size)
370 void *ptr; 376 void *ptr;
371 377
372 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE); 378 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
373 if (!ptr) 379 if (!ptr) {
380 pr_warning("PERCPU: failed to allocate "
381 "4k page for cpu%u\n", cpu);
374 goto enomem; 382 goto enomem;
383 }
375 384
376 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); 385 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
377 pcpu4k_pages[j++] = virt_to_page(ptr); 386 pcpu4k_pages[j++] = virt_to_page(ptr);
@@ -395,6 +404,16 @@ out_free_ar:
395 return ret; 404 return ret;
396} 405}
397 406
407/* for explicit first chunk allocator selection */
408static char pcpu_chosen_alloc[16] __initdata;
409
410static int __init percpu_alloc_setup(char *str)
411{
412 strncpy(pcpu_chosen_alloc, str, sizeof(pcpu_chosen_alloc) - 1);
413 return 0;
414}
415early_param("percpu_alloc", percpu_alloc_setup);
416
398static inline void setup_percpu_segment(int cpu) 417static inline void setup_percpu_segment(int cpu)
399{ 418{
400#ifdef CONFIG_X86_32 419#ifdef CONFIG_X86_32
@@ -408,11 +427,6 @@ static inline void setup_percpu_segment(int cpu)
408#endif 427#endif
409} 428}
410 429
411/*
412 * Great future plan:
413 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
414 * Always point %gs to its beginning
415 */
416void __init setup_per_cpu_areas(void) 430void __init setup_per_cpu_areas(void)
417{ 431{
418 size_t static_size = __per_cpu_end - __per_cpu_start; 432 size_t static_size = __per_cpu_end - __per_cpu_start;
@@ -429,9 +443,26 @@ void __init setup_per_cpu_areas(void)
429 * of large page mappings. Please read comments on top of 443 * of large page mappings. Please read comments on top of
430 * each allocator for details. 444 * each allocator for details.
431 */ 445 */
432 ret = setup_pcpu_lpage(static_size); 446 ret = -EINVAL;
433 if (ret < 0) 447 if (strlen(pcpu_chosen_alloc)) {
434 ret = setup_pcpu_embed(static_size); 448 if (strcmp(pcpu_chosen_alloc, "4k")) {
449 if (!strcmp(pcpu_chosen_alloc, "lpage"))
450 ret = setup_pcpu_lpage(static_size, true);
451 else if (!strcmp(pcpu_chosen_alloc, "embed"))
452 ret = setup_pcpu_embed(static_size, true);
453 else
454 pr_warning("PERCPU: unknown allocator %s "
455 "specified\n", pcpu_chosen_alloc);
456 if (ret < 0)
457 pr_warning("PERCPU: %s allocator failed (%zd), "
458 "falling back to 4k\n",
459 pcpu_chosen_alloc, ret);
460 }
461 } else {
462 ret = setup_pcpu_lpage(static_size, false);
463 if (ret < 0)
464 ret = setup_pcpu_embed(static_size, false);
465 }
435 if (ret < 0) 466 if (ret < 0)
436 ret = setup_pcpu_4k(static_size); 467 ret = setup_pcpu_4k(static_size);
437 if (ret < 0) 468 if (ret < 0)