aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-08-19 23:49:44 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-16 10:52:03 -0400
commit1f3fcd4b1adc972d5c6a34cfed98931c46575b49 (patch)
treea79b6c656a09a8424863a0025d5b20e7264d6999
parent3ddfda11861d305b02ed810b522dcf48b74ca808 (diff)
add per_cpu_dyn_array support
allow dyn-array in per_cpu area, allocated dynamically. usage: | /* in .h */ | struct kernel_stat { | struct cpu_usage_stat cpustat; | unsigned int *irqs; | }; | | /* in .c */ | DEFINE_PER_CPU(struct kernel_stat, kstat); | | DEFINE_PER_CPU_DYN_ARRAY_ADDR(per_cpu__kstat_irqs, per_cpu__kstat.irqs, sizeof(unsigned int), nr_irqs, sizeof(unsigned long), NULL); after setup_percpu()/per_cpu_alloc_dyn_array(), the dyn_array in per_cpu area is ready to use. Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/setup_percpu.c7
-rw-r--r--include/asm-generic/vmlinux.lds.h6
-rw-r--r--include/linux/init.h27
-rw-r--r--init/main.c63
4 files changed, 96 insertions, 7 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 0e67f72d9316..13ba7a83808d 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -140,7 +140,7 @@ static void __init setup_cpu_pda_map(void)
140 */ 140 */
141void __init setup_per_cpu_areas(void) 141void __init setup_per_cpu_areas(void)
142{ 142{
143 ssize_t size = PERCPU_ENOUGH_ROOM; 143 ssize_t size, old_size;
144 char *ptr; 144 char *ptr;
145 int cpu; 145 int cpu;
146 146
@@ -148,7 +148,8 @@ void __init setup_per_cpu_areas(void)
148 setup_cpu_pda_map(); 148 setup_cpu_pda_map();
149 149
150 /* Copy section for each CPU (we discard the original) */ 150 /* Copy section for each CPU (we discard the original) */
151 size = PERCPU_ENOUGH_ROOM; 151 old_size = PERCPU_ENOUGH_ROOM;
152 size = old_size + per_cpu_dyn_array_size();
152 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", 153 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
153 size); 154 size);
154 155
@@ -176,6 +177,8 @@ void __init setup_per_cpu_areas(void)
176 per_cpu_offset(cpu) = ptr - __per_cpu_start; 177 per_cpu_offset(cpu) = ptr - __per_cpu_start;
177 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 178 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
178 179
180 per_cpu_alloc_dyn_array(cpu, ptr + old_size);
181
179 } 182 }
180 183
181 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n", 184 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 7881406c03ec..c68eda9d9a90 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -216,6 +216,12 @@
216 VMLINUX_SYMBOL(__dyn_array_start) = .; \ 216 VMLINUX_SYMBOL(__dyn_array_start) = .; \
217 *(.dyn_array.init) \ 217 *(.dyn_array.init) \
218 VMLINUX_SYMBOL(__dyn_array_end) = .; \ 218 VMLINUX_SYMBOL(__dyn_array_end) = .; \
219 } \
220 . = ALIGN((align)); \
221 .per_cpu_dyn_array.init : AT(ADDR(.per_cpu_dyn_array.init) - LOAD_OFFSET) { \
222 VMLINUX_SYMBOL(__per_cpu_dyn_array_start) = .; \
223 *(.per_cpu_dyn_array.init) \
224 VMLINUX_SYMBOL(__per_cpu_dyn_array_end) = .; \
219 } 225 }
220#define SECURITY_INIT \ 226#define SECURITY_INIT \
221 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 227 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
diff --git a/include/linux/init.h b/include/linux/init.h
index cf9fa7f174af..332806826b8e 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -255,12 +255,13 @@ struct dyn_array {
255 void (*init_work)(void *); 255 void (*init_work)(void *);
256}; 256};
257extern struct dyn_array *__dyn_array_start[], *__dyn_array_end[]; 257extern struct dyn_array *__dyn_array_start[], *__dyn_array_end[];
258extern struct dyn_array *__per_cpu_dyn_array_start[], *__per_cpu_dyn_array_end[];
258 259
259#define DEFINE_DYN_ARRAY(nameX, sizeX, nrX, alignX, init_workX) \ 260#define DEFINE_DYN_ARRAY_ADDR(nameX, addrX, sizeX, nrX, alignX, init_workX) \
260 static struct dyn_array __dyn_array_##nameX __initdata = \ 261 static struct dyn_array __dyn_array_##nameX __initdata = \
261 { .name = (void **)&nameX,\ 262 { .name = (void **)&(nameX),\
262 .size = sizeX,\ 263 .size = sizeX,\
263 .nr = &nrX,\ 264 .nr = &(nrX),\
264 .align = alignX,\ 265 .align = alignX,\
265 .init_work = init_workX,\ 266 .init_work = init_workX,\
266 }; \ 267 }; \
@@ -268,7 +269,27 @@ extern struct dyn_array *__dyn_array_start[], *__dyn_array_end[];
268 __attribute__((__section__(".dyn_array.init"))) = \ 269 __attribute__((__section__(".dyn_array.init"))) = \
269 &__dyn_array_##nameX 270 &__dyn_array_##nameX
270 271
272#define DEFINE_DYN_ARRAY(nameX, sizeX, nrX, alignX, init_workX) \
273 DEFINE_DYN_ARRAY_ADDR(nameX, nameX, sizeX, nrX, alignX, init_workX)
274
275#define DEFINE_PER_CPU_DYN_ARRAY_ADDR(nameX, addrX, sizeX, nrX, alignX, init_workX) \
276 static struct dyn_array __per_cpu_dyn_array_##nameX __initdata = \
277 { .name = (void **)&(addrX),\
278 .size = sizeX,\
279 .nr = &(nrX),\
280 .align = alignX,\
281 .init_work = init_workX,\
282 }; \
283 static struct dyn_array *__per_cpu_dyn_array_ptr_##nameX __used \
284 __attribute__((__section__(".per_cpu_dyn_array.init"))) = \
285 &__per_cpu_dyn_array_##nameX
286
287#define DEFINE_PER_CPU_DYN_ARRAY(nameX, sizeX, nrX, alignX, init_workX) \
288 DEFINE_PER_CPU_DYN_ARRAY_ADDR(nameX, nameX, nrX, alignX, init_workX)
289
271extern void pre_alloc_dyn_array(void); 290extern void pre_alloc_dyn_array(void);
291extern unsigned long per_cpu_dyn_array_size(void);
292extern void per_cpu_alloc_dyn_array(int cpu, char *ptr);
272#endif /* __ASSEMBLY__ */ 293#endif /* __ASSEMBLY__ */
273 294
274/** 295/**
diff --git a/init/main.c b/init/main.c
index 638d3a786412..416bca4f734f 100644
--- a/init/main.c
+++ b/init/main.c
@@ -391,17 +391,19 @@ EXPORT_SYMBOL(__per_cpu_offset);
391 391
392static void __init setup_per_cpu_areas(void) 392static void __init setup_per_cpu_areas(void)
393{ 393{
394 unsigned long size, i; 394 unsigned long size, i, old_size;
395 char *ptr; 395 char *ptr;
396 unsigned long nr_possible_cpus = num_possible_cpus(); 396 unsigned long nr_possible_cpus = num_possible_cpus();
397 397
398 /* Copy section for each CPU (we discard the original) */ 398 /* Copy section for each CPU (we discard the original) */
399 size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE); 399 old_size = PERCPU_ENOUGH_ROOM;
400 size = ALIGN(old_size + per_cpu_dyn_array_size(), PAGE_SIZE);
400 ptr = alloc_bootmem_pages(size * nr_possible_cpus); 401 ptr = alloc_bootmem_pages(size * nr_possible_cpus);
401 402
402 for_each_possible_cpu(i) { 403 for_each_possible_cpu(i) {
403 __per_cpu_offset[i] = ptr - __per_cpu_start; 404 __per_cpu_offset[i] = ptr - __per_cpu_start;
404 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 405 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
406 per_cpu_alloc_dyn_array(i, ptr + old_size);
405 ptr += size; 407 ptr += size;
406 } 408 }
407} 409}
@@ -559,6 +561,63 @@ void pre_alloc_dyn_array(void)
559#endif 561#endif
560} 562}
561 563
564unsigned long per_cpu_dyn_array_size(void)
565{
566 unsigned long total_size = 0;
567#ifdef CONFIG_HAVE_DYN_ARRAY
568 unsigned long size;
569 struct dyn_array **daa;
570
571 for (daa = __per_cpu_dyn_array_start ; daa < __per_cpu_dyn_array_end; daa++) {
572 struct dyn_array *da = *daa;
573
574 size = da->size * (*da->nr);
575 print_fn_descriptor_symbol("per_cpu_dyna_array %s ", da->name);
576 printk(KERN_CONT "size:%#lx nr:%d align:%#lx\n",
577 da->size, *da->nr, da->align);
578 total_size += roundup(size, da->align);
579 }
580 if (total_size)
581 printk(KERN_DEBUG "per_cpu_dyna_array total_size: %#lx\n",
582 total_size);
583#endif
584 return total_size;
585}
586
587void per_cpu_alloc_dyn_array(int cpu, char *ptr)
588{
589#ifdef CONFIG_HAVE_DYN_ARRAY
590 unsigned long size, phys;
591 struct dyn_array **daa;
592 unsigned long addr;
593 void **array;
594
595 phys = virt_to_phys(ptr);
596
597 for (daa = __per_cpu_dyn_array_start ; daa < __per_cpu_dyn_array_end; daa++) {
598 struct dyn_array *da = *daa;
599
600 size = da->size * (*da->nr);
601 print_fn_descriptor_symbol("per_cpu_dyna_array %s ", da->name);
602 printk(KERN_CONT "size:%#lx nr:%d align:%#lx",
603 da->size, *da->nr, da->align);
604
605 phys = roundup(phys, da->align);
606 addr = (unsigned long)da->name;
607 addr += per_cpu_offset(cpu);
608 array = (void **)addr;
609 *array = phys_to_virt(phys);
610 *da->name = *array; /* so init_work could use it directly */
611 printk(KERN_CONT " %p ==> [%#lx - %#lx]\n", array, phys, phys + size);
612 phys += size;
613
614 if (da->init_work) {
615 da->init_work(da);
616 }
617 }
618#endif
619}
620
562asmlinkage void __init start_kernel(void) 621asmlinkage void __init start_kernel(void)
563{ 622{
564 char * command_line; 623 char * command_line;