aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm/discontig.c
diff options
context:
space:
mode:
authorholt@sgi.com <holt@sgi.com>2008-04-03 16:17:13 -0400
committerTony Luck <tony.luck@intel.com>2008-04-08 16:51:35 -0400
commit2c6e6db41f01b6b4eb98809350827c9678996698 (patch)
tree00438344c0ad599c1301db2abe32a4c2ee89b607 /arch/ia64/mm/discontig.c
parent41bd26d67c41e325c6b9e56aadfe9dad8af9a565 (diff)
[IA64] Minimize per_cpu reservations.
This attached patch significantly shrinks boot memory allocation on ia64. It does this by not allocating per_cpu areas for cpus that can never exist. In the case where acpi does not have any numa node description of the cpus, I defaulted to assigning the first 32 round-robin on the known nodes.. For the !CONFIG_ACPI I used for_each_possible_cpu(). Signed-off-by: Robin Holt <holt@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/mm/discontig.c')
-rw-r--r--arch/ia64/mm/discontig.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 06c540a29467..6136a4c6df11 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -104,7 +104,7 @@ static int __meminit early_nr_cpus_node(int node)
104{ 104{
105 int cpu, n = 0; 105 int cpu, n = 0;
106 106
107 for (cpu = 0; cpu < NR_CPUS; cpu++) 107 for_each_possible_early_cpu(cpu)
108 if (node == node_cpuid[cpu].nid) 108 if (node == node_cpuid[cpu].nid)
109 n++; 109 n++;
110 110
@@ -143,7 +143,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
143#ifdef CONFIG_SMP 143#ifdef CONFIG_SMP
144 int cpu; 144 int cpu;
145 145
146 for (cpu = 0; cpu < NR_CPUS; cpu++) { 146 for_each_possible_early_cpu(cpu) {
147 if (node == node_cpuid[cpu].nid) { 147 if (node == node_cpuid[cpu].nid) {
148 memcpy(__va(cpu_data), __phys_per_cpu_start, 148 memcpy(__va(cpu_data), __phys_per_cpu_start,
149 __per_cpu_end - __per_cpu_start); 149 __per_cpu_end - __per_cpu_start);
@@ -346,7 +346,7 @@ static void __init initialize_pernode_data(void)
346 346
347#ifdef CONFIG_SMP 347#ifdef CONFIG_SMP
348 /* Set the node_data pointer for each per-cpu struct */ 348 /* Set the node_data pointer for each per-cpu struct */
349 for (cpu = 0; cpu < NR_CPUS; cpu++) { 349 for_each_possible_early_cpu(cpu) {
350 node = node_cpuid[cpu].nid; 350 node = node_cpuid[cpu].nid;
351 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; 351 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
352 } 352 }
@@ -494,13 +494,9 @@ void __cpuinit *per_cpu_init(void)
494 int cpu; 494 int cpu;
495 static int first_time = 1; 495 static int first_time = 1;
496 496
497
498 if (smp_processor_id() != 0)
499 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
500
501 if (first_time) { 497 if (first_time) {
502 first_time = 0; 498 first_time = 0;
503 for (cpu = 0; cpu < NR_CPUS; cpu++) 499 for_each_possible_early_cpu(cpu)
504 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; 500 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
505 } 501 }
506 502