diff options
author | holt@sgi.com <holt@sgi.com> | 2008-04-03 16:17:13 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2008-04-08 16:51:35 -0400 |
commit | 2c6e6db41f01b6b4eb98809350827c9678996698 (patch) | |
tree | 00438344c0ad599c1301db2abe32a4c2ee89b607 /include/asm-ia64/acpi.h | |
parent | 41bd26d67c41e325c6b9e56aadfe9dad8af9a565 (diff) |
[IA64] Minimize per_cpu reservations.
This attached patch significantly shrinks boot memory allocation on ia64.
It does this by not allocating per_cpu areas for cpus that can never
exist.
In the case where acpi does not have any numa node description of the
cpus, I defaulted to assigning the first 32 round-robin on the known
nodes.. For the !CONFIG_ACPI I used for_each_possible_cpu().
Signed-off-by: Robin Holt <holt@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include/asm-ia64/acpi.h')
-rw-r--r-- | include/asm-ia64/acpi.h | 33 |
1 files changed, 33 insertions, 0 deletions
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h index cd1cc39b5599..fcfad326f4c7 100644 --- a/include/asm-ia64/acpi.h +++ b/include/asm-ia64/acpi.h | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/numa.h> | 36 | #include <linux/numa.h> |
37 | #include <asm/system.h> | 37 | #include <asm/system.h> |
38 | #include <asm/numa.h> | ||
38 | 39 | ||
39 | #define COMPILER_DEPENDENT_INT64 long | 40 | #define COMPILER_DEPENDENT_INT64 long |
40 | #define COMPILER_DEPENDENT_UINT64 unsigned long | 41 | #define COMPILER_DEPENDENT_UINT64 unsigned long |
@@ -115,7 +116,11 @@ extern unsigned int is_cpu_cpei_target(unsigned int cpu); | |||
115 | extern void set_cpei_target_cpu(unsigned int cpu); | 116 | extern void set_cpei_target_cpu(unsigned int cpu); |
116 | extern unsigned int get_cpei_target_cpu(void); | 117 | extern unsigned int get_cpei_target_cpu(void); |
117 | extern void prefill_possible_map(void); | 118 | extern void prefill_possible_map(void); |
119 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | ||
118 | extern int additional_cpus; | 120 | extern int additional_cpus; |
121 | #else | ||
122 | #define additional_cpus 0 | ||
123 | #endif | ||
119 | 124 | ||
120 | #ifdef CONFIG_ACPI_NUMA | 125 | #ifdef CONFIG_ACPI_NUMA |
121 | #if MAX_NUMNODES > 256 | 126 | #if MAX_NUMNODES > 256 |
@@ -129,6 +134,34 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; | |||
129 | 134 | ||
130 | #define acpi_unlazy_tlb(x) | 135 | #define acpi_unlazy_tlb(x) |
131 | 136 | ||
137 | #ifdef CONFIG_ACPI_NUMA | ||
138 | extern cpumask_t early_cpu_possible_map; | ||
139 | #define for_each_possible_early_cpu(cpu) \ | ||
140 | for_each_cpu_mask((cpu), early_cpu_possible_map) | ||
141 | |||
142 | static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus) | ||
143 | { | ||
144 | int low_cpu, high_cpu; | ||
145 | int cpu; | ||
146 | int next_nid = 0; | ||
147 | |||
148 | low_cpu = cpus_weight(early_cpu_possible_map); | ||
149 | |||
150 | high_cpu = max(low_cpu, min_cpus); | ||
151 | high_cpu = min(high_cpu + reserve_cpus, NR_CPUS); | ||
152 | |||
153 | for (cpu = low_cpu; cpu < high_cpu; cpu++) { | ||
154 | cpu_set(cpu, early_cpu_possible_map); | ||
155 | if (node_cpuid[cpu].nid == NUMA_NO_NODE) { | ||
156 | node_cpuid[cpu].nid = next_nid; | ||
157 | next_nid++; | ||
158 | if (next_nid >= num_online_nodes()) | ||
159 | next_nid = 0; | ||
160 | } | ||
161 | } | ||
162 | } | ||
163 | #endif /* CONFIG_ACPI_NUMA */ | ||
164 | |||
132 | #endif /*__KERNEL__*/ | 165 | #endif /*__KERNEL__*/ |
133 | 166 | ||
134 | #endif /*_ASM_ACPI_H*/ | 167 | #endif /*_ASM_ACPI_H*/ |