aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm/discontig.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm/discontig.c')
-rw-r--r--arch/ia64/mm/discontig.c72
1 files changed, 49 insertions, 23 deletions
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 54136fd00202..b5c90e548195 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -126,6 +126,33 @@ static unsigned long __init compute_pernodesize(int node)
126} 126}
127 127
128/** 128/**
129 * per_cpu_node_setup - setup per-cpu areas on each node
130 * @cpu_data: per-cpu area on this node
131 * @node: node to setup
132 *
133 * Copy the static per-cpu data into the region we just set aside and then
134 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
135 * the end of the area.
136 */
137static void *per_cpu_node_setup(void *cpu_data, int node)
138{
139#ifdef CONFIG_SMP
140 int cpu;
141
142 for (cpu = 0; cpu < NR_CPUS; cpu++) {
143 if (node == node_cpuid[cpu].nid) {
144 memcpy(__va(cpu_data), __phys_per_cpu_start,
145 __per_cpu_end - __per_cpu_start);
146 __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
147 __per_cpu_start;
148 cpu_data += PERCPU_PAGE_SIZE;
149 }
150 }
151#endif
152 return cpu_data;
153}
154
155/**
129 * fill_pernode - initialize pernode data. 156 * fill_pernode - initialize pernode data.
130 * @node: the node id. 157 * @node: the node id.
131 * @pernode: physical address of pernode data 158 * @pernode: physical address of pernode data
@@ -135,7 +162,7 @@ static void __init fill_pernode(int node, unsigned long pernode,
135 unsigned long pernodesize) 162 unsigned long pernodesize)
136{ 163{
137 void *cpu_data; 164 void *cpu_data;
138 int cpus = early_nr_cpus_node(node), cpu; 165 int cpus = early_nr_cpus_node(node);
139 struct bootmem_data *bdp = &mem_data[node].bootmem_data; 166 struct bootmem_data *bdp = &mem_data[node].bootmem_data;
140 167
141 mem_data[node].pernode_addr = pernode; 168 mem_data[node].pernode_addr = pernode;
@@ -155,23 +182,11 @@ static void __init fill_pernode(int node, unsigned long pernode,
155 mem_data[node].pgdat->bdata = bdp; 182 mem_data[node].pgdat->bdata = bdp;
156 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); 183 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
157 184
158 /* 185 cpu_data = per_cpu_node_setup(cpu_data, node);
159 * Copy the static per-cpu data into the region we
160 * just set aside and then setup __per_cpu_offset
161 * for each CPU on this node.
162 */
163 for (cpu = 0; cpu < NR_CPUS; cpu++) {
164 if (node == node_cpuid[cpu].nid) {
165 memcpy(__va(cpu_data), __phys_per_cpu_start,
166 __per_cpu_end - __per_cpu_start);
167 __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
168 __per_cpu_start;
169 cpu_data += PERCPU_PAGE_SIZE;
170 }
171 }
172 186
173 return; 187 return;
174} 188}
189
175/** 190/**
176 * find_pernode_space - allocate memory for memory map and per-node structures 191 * find_pernode_space - allocate memory for memory map and per-node structures
177 * @start: physical start of range 192 * @start: physical start of range
@@ -300,8 +315,8 @@ static void __init reserve_pernode_space(void)
300 */ 315 */
301static void __init initialize_pernode_data(void) 316static void __init initialize_pernode_data(void)
302{ 317{
303 int cpu, node;
304 pg_data_t *pgdat_list[MAX_NUMNODES]; 318 pg_data_t *pgdat_list[MAX_NUMNODES];
319 int cpu, node;
305 320
306 for_each_online_node(node) 321 for_each_online_node(node)
307 pgdat_list[node] = mem_data[node].pgdat; 322 pgdat_list[node] = mem_data[node].pgdat;
@@ -311,12 +326,22 @@ static void __init initialize_pernode_data(void)
311 memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list, 326 memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list,
312 sizeof(pgdat_list)); 327 sizeof(pgdat_list));
313 } 328 }
314 329#ifdef CONFIG_SMP
315 /* Set the node_data pointer for each per-cpu struct */ 330 /* Set the node_data pointer for each per-cpu struct */
316 for (cpu = 0; cpu < NR_CPUS; cpu++) { 331 for (cpu = 0; cpu < NR_CPUS; cpu++) {
317 node = node_cpuid[cpu].nid; 332 node = node_cpuid[cpu].nid;
318 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; 333 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
319 } 334 }
335#else
336 {
337 struct cpuinfo_ia64 *cpu0_cpu_info;
338 cpu = 0;
339 node = node_cpuid[cpu].nid;
340 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
341 ((char *)&per_cpu__cpu_info - __per_cpu_start));
342 cpu0_cpu_info->node_data = mem_data[node].node_data;
343 }
344#endif /* CONFIG_SMP */
320} 345}
321 346
322/** 347/**
@@ -461,6 +486,7 @@ void __init find_memory(void)
461 find_initrd(); 486 find_initrd();
462} 487}
463 488
489#ifdef CONFIG_SMP
464/** 490/**
465 * per_cpu_init - setup per-cpu variables 491 * per_cpu_init - setup per-cpu variables
466 * 492 *
@@ -471,15 +497,15 @@ void *per_cpu_init(void)
471{ 497{
472 int cpu; 498 int cpu;
473 499
474 if (smp_processor_id() == 0) { 500 if (smp_processor_id() != 0)
475 for (cpu = 0; cpu < NR_CPUS; cpu++) { 501 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
476 per_cpu(local_per_cpu_offset, cpu) = 502
477 __per_cpu_offset[cpu]; 503 for (cpu = 0; cpu < NR_CPUS; cpu++)
478 } 504 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
479 }
480 505
481 return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; 506 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
482} 507}
508#endif /* CONFIG_SMP */
483 509
484/** 510/**
485 * show_mem - give short summary of memory stats 511 * show_mem - give short summary of memory stats