aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2006-09-26 04:52:35 -0400
committerAndi Kleen <andi@basil.nowhere.org>2006-09-26 04:52:35 -0400
commitba4d40bb5c465f0a4dcc30d02dab80c2cb7e1ff3 (patch)
tree12b10f26f5efddb15ef8a75a1eff38771334365d
parent522e93e3fcdbf00ba85c72fde6df28cfc0486a65 (diff)
[PATCH] Auto size the per cpu area.
Now for a completely different but trivial approach. I just boot tested it with 255 CPUS and everything worked. Currently everything (except module data) we place in the per cpu area we know about at compile time. So instead of allocating a fixed size for the per_cpu area allocate the number of bytes we need plus a fixed constant for to be used for modules. It isn't perfect but it is much less of a pain to work with than what we are doing now. AK: fixed warning Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Andi Kleen <ak@suse.de>
-rw-r--r--arch/x86_64/kernel/setup64.c7
-rw-r--r--include/asm-x86_64/percpu.h10
2 files changed, 12 insertions, 5 deletions
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index b09e60fa96b4..e85cfbb49b63 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -95,12 +95,9 @@ void __init setup_per_cpu_areas(void)
95#endif 95#endif
96 96
97 /* Copy section for each CPU (we discard the original) */ 97 /* Copy section for each CPU (we discard the original) */
98 size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); 98 size = PERCPU_ENOUGH_ROOM;
99#ifdef CONFIG_MODULES
100 if (size < PERCPU_ENOUGH_ROOM)
101 size = PERCPU_ENOUGH_ROOM;
102#endif
103 99
100 printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size);
104 for_each_cpu_mask (i, cpu_possible_map) { 101 for_each_cpu_mask (i, cpu_possible_map) {
105 char *ptr; 102 char *ptr;
106 103
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 08dd9f9dda81..39d2bab9b520 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -11,6 +11,16 @@
11 11
12#include <asm/pda.h> 12#include <asm/pda.h>
13 13
14#ifdef CONFIG_MODULES
15# define PERCPU_MODULE_RESERVE 8192
16#else
17# define PERCPU_MODULE_RESERVE 0
18#endif
19
20#define PERCPU_ENOUGH_ROOM \
21 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
22 PERCPU_MODULE_RESERVE)
23
14#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) 24#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
15#define __my_cpu_offset() read_pda(data_offset) 25#define __my_cpu_offset() read_pda(data_offset)
16 26