aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup.c
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-05-12 15:21:13 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 05:31:25 -0400
commit3461b0af025251bbc6b3d56c821c6ac2de6f7209 (patch)
tree5350ad712311a69b4b59deec1ec60d8573db8817 /arch/x86/kernel/setup.c
parent9f248bde9d47cc177011198c9a15fb339b9f3215 (diff)
x86: remove static boot_cpu_pda array v2
* Remove the boot_cpu_pda array and pointer table from the data section. Allocate the pointer table and array during init. do_boot_cpu() will reallocate the pda in node local memory and if the cpu is being brought up before the bootmem array is released (after_bootmem = 0), then it will free the initial pda. This will happen for all cpus present at system startup. This removes 512k + 32k bytes from the data section. For inclusion into sched-devel/latest tree. Based on: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git + sched-devel/latest .../mingo/linux-2.6-sched-devel.git Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/setup.c')
-rw-r--r--arch/x86/kernel/setup.c73
1 files changed, 57 insertions, 16 deletions
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 913af838c3c5..dd12c1c84a8f 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -101,6 +101,50 @@ static inline void setup_cpumask_of_cpu(void) { }
101 */ 101 */
102unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 102unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
103EXPORT_SYMBOL(__per_cpu_offset); 103EXPORT_SYMBOL(__per_cpu_offset);
104static inline void setup_cpu_pda_map(void) { }
105
106#elif !defined(CONFIG_SMP)
107static inline void setup_cpu_pda_map(void) { }
108
109#else /* CONFIG_SMP && CONFIG_X86_64 */
110
111/*
112 * Allocate cpu_pda pointer table and array via alloc_bootmem.
113 */
114static void __init setup_cpu_pda_map(void)
115{
116 char *pda;
117 struct x8664_pda **new_cpu_pda;
118 unsigned long size;
119 int cpu;
120
121 size = roundup(sizeof(struct x8664_pda), cache_line_size());
122
123 /* allocate cpu_pda array and pointer table */
124 {
125 unsigned long tsize = nr_cpu_ids * sizeof(void *);
126 unsigned long asize = size * (nr_cpu_ids - 1);
127
128 tsize = roundup(tsize, cache_line_size());
129 new_cpu_pda = alloc_bootmem(tsize + asize);
130 pda = (char *)new_cpu_pda + tsize;
131 }
132
133 /* initialize pointer table to static pda's */
134 for_each_possible_cpu(cpu) {
135 if (cpu == 0) {
136 /* leave boot cpu pda in place */
137 new_cpu_pda[0] = cpu_pda(0);
138 continue;
139 }
140 new_cpu_pda[cpu] = (struct x8664_pda *)pda;
141 new_cpu_pda[cpu]->in_bootmem = 1;
142 pda += size;
143 }
144
145 /* point to new pointer table */
146 _cpu_pda = new_cpu_pda;
147}
104#endif 148#endif
105 149
106/* 150/*
@@ -110,46 +154,43 @@ EXPORT_SYMBOL(__per_cpu_offset);
110 */ 154 */
111void __init setup_per_cpu_areas(void) 155void __init setup_per_cpu_areas(void)
112{ 156{
113 int i, highest_cpu = 0; 157 ssize_t size = PERCPU_ENOUGH_ROOM;
114 unsigned long size; 158 char *ptr;
159 int cpu;
115 160
116#ifdef CONFIG_HOTPLUG_CPU 161#ifdef CONFIG_HOTPLUG_CPU
117 prefill_possible_map(); 162 prefill_possible_map();
163#else
164 nr_cpu_ids = num_processors;
118#endif 165#endif
119 166
167 /* Setup cpu_pda map */
168 setup_cpu_pda_map();
169
120 /* Copy section for each CPU (we discard the original) */ 170 /* Copy section for each CPU (we discard the original) */
121 size = PERCPU_ENOUGH_ROOM; 171 size = PERCPU_ENOUGH_ROOM;
122 printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", 172 printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n",
123 size); 173 size);
124 174
125 for_each_possible_cpu(i) { 175 for_each_possible_cpu(cpu) {
126 char *ptr;
127#ifndef CONFIG_NEED_MULTIPLE_NODES 176#ifndef CONFIG_NEED_MULTIPLE_NODES
128 ptr = alloc_bootmem_pages(size); 177 ptr = alloc_bootmem_pages(size);
129#else 178#else
130 int node = early_cpu_to_node(i); 179 int node = early_cpu_to_node(cpu);
131 if (!node_online(node) || !NODE_DATA(node)) { 180 if (!node_online(node) || !NODE_DATA(node)) {
132 ptr = alloc_bootmem_pages(size); 181 ptr = alloc_bootmem_pages(size);
133 printk(KERN_INFO 182 printk(KERN_INFO
134 "cpu %d has no node %d or node-local memory\n", 183 "cpu %d has no node %d or node-local memory\n",
135 i, node); 184 cpu, node);
136 } 185 }
137 else 186 else
138 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); 187 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
139#endif 188#endif
140 if (!ptr) 189 per_cpu_offset(cpu) = ptr - __per_cpu_start;
141 panic("Cannot allocate cpu data for CPU %d\n", i);
142#ifdef CONFIG_X86_64
143 cpu_pda(i)->data_offset = ptr - __per_cpu_start;
144#else
145 __per_cpu_offset[i] = ptr - __per_cpu_start;
146#endif
147 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 190 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
148 191
149 highest_cpu = i;
150 } 192 }
151 193
152 nr_cpu_ids = highest_cpu + 1;
153 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n", 194 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
154 NR_CPUS, nr_cpu_ids, nr_node_ids); 195 NR_CPUS, nr_cpu_ids, nr_node_ids);
155 196
@@ -199,7 +240,7 @@ void __cpuinit numa_set_node(int cpu, int node)
199{ 240{
200 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); 241 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
201 242
202 if (node != NUMA_NO_NODE) 243 if (cpu_pda(cpu) && node != NUMA_NO_NODE)
203 cpu_pda(cpu)->nodenumber = node; 244 cpu_pda(cpu)->nodenumber = node;
204 245
205 if (cpu_to_node_map) 246 if (cpu_to_node_map)