diff options
Diffstat (limited to 'arch/ia64/kernel/domain.c')
-rw-r--r-- | arch/ia64/kernel/domain.c | 76 |
1 files changed, 45 insertions, 31 deletions
diff --git a/arch/ia64/kernel/domain.c b/arch/ia64/kernel/domain.c index afbde79c3b3d..d65e87b6394f 100644 --- a/arch/ia64/kernel/domain.c +++ b/arch/ia64/kernel/domain.c | |||
@@ -27,7 +27,7 @@ | |||
27 | * | 27 | * |
28 | * Should use nodemask_t. | 28 | * Should use nodemask_t. |
29 | */ | 29 | */ |
30 | static int __devinit find_next_best_node(int node, unsigned long *used_nodes) | 30 | static int find_next_best_node(int node, unsigned long *used_nodes) |
31 | { | 31 | { |
32 | int i, n, val, min_val, best_node = 0; | 32 | int i, n, val, min_val, best_node = 0; |
33 | 33 | ||
@@ -66,7 +66,7 @@ static int __devinit find_next_best_node(int node, unsigned long *used_nodes) | |||
66 | * should be one that prevents unnecessary balancing, but also spreads tasks | 66 | * should be one that prevents unnecessary balancing, but also spreads tasks |
67 | * out optimally. | 67 | * out optimally. |
68 | */ | 68 | */ |
69 | static cpumask_t __devinit sched_domain_node_span(int node) | 69 | static cpumask_t sched_domain_node_span(int node) |
70 | { | 70 | { |
71 | int i; | 71 | int i; |
72 | cpumask_t span, nodemask; | 72 | cpumask_t span, nodemask; |
@@ -96,7 +96,7 @@ static cpumask_t __devinit sched_domain_node_span(int node) | |||
96 | #ifdef CONFIG_SCHED_SMT | 96 | #ifdef CONFIG_SCHED_SMT |
97 | static DEFINE_PER_CPU(struct sched_domain, cpu_domains); | 97 | static DEFINE_PER_CPU(struct sched_domain, cpu_domains); |
98 | static struct sched_group sched_group_cpus[NR_CPUS]; | 98 | static struct sched_group sched_group_cpus[NR_CPUS]; |
99 | static int __devinit cpu_to_cpu_group(int cpu) | 99 | static int cpu_to_cpu_group(int cpu) |
100 | { | 100 | { |
101 | return cpu; | 101 | return cpu; |
102 | } | 102 | } |
@@ -104,7 +104,7 @@ static int __devinit cpu_to_cpu_group(int cpu) | |||
104 | 104 | ||
105 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | 105 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); |
106 | static struct sched_group sched_group_phys[NR_CPUS]; | 106 | static struct sched_group sched_group_phys[NR_CPUS]; |
107 | static int __devinit cpu_to_phys_group(int cpu) | 107 | static int cpu_to_phys_group(int cpu) |
108 | { | 108 | { |
109 | #ifdef CONFIG_SCHED_SMT | 109 | #ifdef CONFIG_SCHED_SMT |
110 | return first_cpu(cpu_sibling_map[cpu]); | 110 | return first_cpu(cpu_sibling_map[cpu]); |
@@ -125,44 +125,36 @@ static struct sched_group *sched_group_nodes[MAX_NUMNODES]; | |||
125 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | 125 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); |
126 | static struct sched_group sched_group_allnodes[MAX_NUMNODES]; | 126 | static struct sched_group sched_group_allnodes[MAX_NUMNODES]; |
127 | 127 | ||
128 | static int __devinit cpu_to_allnodes_group(int cpu) | 128 | static int cpu_to_allnodes_group(int cpu) |
129 | { | 129 | { |
130 | return cpu_to_node(cpu); | 130 | return cpu_to_node(cpu); |
131 | } | 131 | } |
132 | #endif | 132 | #endif |
133 | 133 | ||
134 | /* | 134 | /* |
135 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 135 | * Build sched domains for a given set of cpus and attach the sched domains |
136 | * to the individual cpus | ||
136 | */ | 137 | */ |
137 | void __devinit arch_init_sched_domains(void) | 138 | void build_sched_domains(const cpumask_t *cpu_map) |
138 | { | 139 | { |
139 | int i; | 140 | int i; |
140 | cpumask_t cpu_default_map; | ||
141 | 141 | ||
142 | /* | 142 | /* |
143 | * Setup mask for cpus without special case scheduling requirements. | 143 | * Set up domains for cpus specified by the cpu_map. |
144 | * For now this just excludes isolated cpus, but could be used to | ||
145 | * exclude other special cases in the future. | ||
146 | */ | 144 | */ |
147 | cpus_complement(cpu_default_map, cpu_isolated_map); | 145 | for_each_cpu_mask(i, *cpu_map) { |
148 | cpus_and(cpu_default_map, cpu_default_map, cpu_online_map); | ||
149 | |||
150 | /* | ||
151 | * Set up domains. Isolated domains just stay on the dummy domain. | ||
152 | */ | ||
153 | for_each_cpu_mask(i, cpu_default_map) { | ||
154 | int group; | 146 | int group; |
155 | struct sched_domain *sd = NULL, *p; | 147 | struct sched_domain *sd = NULL, *p; |
156 | cpumask_t nodemask = node_to_cpumask(cpu_to_node(i)); | 148 | cpumask_t nodemask = node_to_cpumask(cpu_to_node(i)); |
157 | 149 | ||
158 | cpus_and(nodemask, nodemask, cpu_default_map); | 150 | cpus_and(nodemask, nodemask, *cpu_map); |
159 | 151 | ||
160 | #ifdef CONFIG_NUMA | 152 | #ifdef CONFIG_NUMA |
161 | if (num_online_cpus() | 153 | if (num_online_cpus() |
162 | > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) { | 154 | > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) { |
163 | sd = &per_cpu(allnodes_domains, i); | 155 | sd = &per_cpu(allnodes_domains, i); |
164 | *sd = SD_ALLNODES_INIT; | 156 | *sd = SD_ALLNODES_INIT; |
165 | sd->span = cpu_default_map; | 157 | sd->span = *cpu_map; |
166 | group = cpu_to_allnodes_group(i); | 158 | group = cpu_to_allnodes_group(i); |
167 | sd->groups = &sched_group_allnodes[group]; | 159 | sd->groups = &sched_group_allnodes[group]; |
168 | p = sd; | 160 | p = sd; |
@@ -173,7 +165,7 @@ void __devinit arch_init_sched_domains(void) | |||
173 | *sd = SD_NODE_INIT; | 165 | *sd = SD_NODE_INIT; |
174 | sd->span = sched_domain_node_span(cpu_to_node(i)); | 166 | sd->span = sched_domain_node_span(cpu_to_node(i)); |
175 | sd->parent = p; | 167 | sd->parent = p; |
176 | cpus_and(sd->span, sd->span, cpu_default_map); | 168 | cpus_and(sd->span, sd->span, *cpu_map); |
177 | #endif | 169 | #endif |
178 | 170 | ||
179 | p = sd; | 171 | p = sd; |
@@ -190,7 +182,7 @@ void __devinit arch_init_sched_domains(void) | |||
190 | group = cpu_to_cpu_group(i); | 182 | group = cpu_to_cpu_group(i); |
191 | *sd = SD_SIBLING_INIT; | 183 | *sd = SD_SIBLING_INIT; |
192 | sd->span = cpu_sibling_map[i]; | 184 | sd->span = cpu_sibling_map[i]; |
193 | cpus_and(sd->span, sd->span, cpu_default_map); | 185 | cpus_and(sd->span, sd->span, *cpu_map); |
194 | sd->parent = p; | 186 | sd->parent = p; |
195 | sd->groups = &sched_group_cpus[group]; | 187 | sd->groups = &sched_group_cpus[group]; |
196 | #endif | 188 | #endif |
@@ -198,9 +190,9 @@ void __devinit arch_init_sched_domains(void) | |||
198 | 190 | ||
199 | #ifdef CONFIG_SCHED_SMT | 191 | #ifdef CONFIG_SCHED_SMT |
200 | /* Set up CPU (sibling) groups */ | 192 | /* Set up CPU (sibling) groups */ |
201 | for_each_cpu_mask(i, cpu_default_map) { | 193 | for_each_cpu_mask(i, *cpu_map) { |
202 | cpumask_t this_sibling_map = cpu_sibling_map[i]; | 194 | cpumask_t this_sibling_map = cpu_sibling_map[i]; |
203 | cpus_and(this_sibling_map, this_sibling_map, cpu_default_map); | 195 | cpus_and(this_sibling_map, this_sibling_map, *cpu_map); |
204 | if (i != first_cpu(this_sibling_map)) | 196 | if (i != first_cpu(this_sibling_map)) |
205 | continue; | 197 | continue; |
206 | 198 | ||
@@ -213,7 +205,7 @@ void __devinit arch_init_sched_domains(void) | |||
213 | for (i = 0; i < MAX_NUMNODES; i++) { | 205 | for (i = 0; i < MAX_NUMNODES; i++) { |
214 | cpumask_t nodemask = node_to_cpumask(i); | 206 | cpumask_t nodemask = node_to_cpumask(i); |
215 | 207 | ||
216 | cpus_and(nodemask, nodemask, cpu_default_map); | 208 | cpus_and(nodemask, nodemask, *cpu_map); |
217 | if (cpus_empty(nodemask)) | 209 | if (cpus_empty(nodemask)) |
218 | continue; | 210 | continue; |
219 | 211 | ||
@@ -222,7 +214,7 @@ void __devinit arch_init_sched_domains(void) | |||
222 | } | 214 | } |
223 | 215 | ||
224 | #ifdef CONFIG_NUMA | 216 | #ifdef CONFIG_NUMA |
225 | init_sched_build_groups(sched_group_allnodes, cpu_default_map, | 217 | init_sched_build_groups(sched_group_allnodes, *cpu_map, |
226 | &cpu_to_allnodes_group); | 218 | &cpu_to_allnodes_group); |
227 | 219 | ||
228 | for (i = 0; i < MAX_NUMNODES; i++) { | 220 | for (i = 0; i < MAX_NUMNODES; i++) { |
@@ -233,12 +225,12 @@ void __devinit arch_init_sched_domains(void) | |||
233 | cpumask_t covered = CPU_MASK_NONE; | 225 | cpumask_t covered = CPU_MASK_NONE; |
234 | int j; | 226 | int j; |
235 | 227 | ||
236 | cpus_and(nodemask, nodemask, cpu_default_map); | 228 | cpus_and(nodemask, nodemask, *cpu_map); |
237 | if (cpus_empty(nodemask)) | 229 | if (cpus_empty(nodemask)) |
238 | continue; | 230 | continue; |
239 | 231 | ||
240 | domainspan = sched_domain_node_span(i); | 232 | domainspan = sched_domain_node_span(i); |
241 | cpus_and(domainspan, domainspan, cpu_default_map); | 233 | cpus_and(domainspan, domainspan, *cpu_map); |
242 | 234 | ||
243 | sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); | 235 | sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); |
244 | sched_group_nodes[i] = sg; | 236 | sched_group_nodes[i] = sg; |
@@ -266,7 +258,7 @@ void __devinit arch_init_sched_domains(void) | |||
266 | int n = (i + j) % MAX_NUMNODES; | 258 | int n = (i + j) % MAX_NUMNODES; |
267 | 259 | ||
268 | cpus_complement(notcovered, covered); | 260 | cpus_complement(notcovered, covered); |
269 | cpus_and(tmp, notcovered, cpu_default_map); | 261 | cpus_and(tmp, notcovered, *cpu_map); |
270 | cpus_and(tmp, tmp, domainspan); | 262 | cpus_and(tmp, tmp, domainspan); |
271 | if (cpus_empty(tmp)) | 263 | if (cpus_empty(tmp)) |
272 | break; | 264 | break; |
@@ -293,7 +285,7 @@ void __devinit arch_init_sched_domains(void) | |||
293 | #endif | 285 | #endif |
294 | 286 | ||
295 | /* Calculate CPU power for physical packages and nodes */ | 287 | /* Calculate CPU power for physical packages and nodes */ |
296 | for_each_cpu_mask(i, cpu_default_map) { | 288 | for_each_cpu_mask(i, *cpu_map) { |
297 | int power; | 289 | int power; |
298 | struct sched_domain *sd; | 290 | struct sched_domain *sd; |
299 | #ifdef CONFIG_SCHED_SMT | 291 | #ifdef CONFIG_SCHED_SMT |
@@ -359,13 +351,35 @@ next_sg: | |||
359 | cpu_attach_domain(sd, i); | 351 | cpu_attach_domain(sd, i); |
360 | } | 352 | } |
361 | } | 353 | } |
354 | /* | ||
355 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | ||
356 | */ | ||
357 | void arch_init_sched_domains(const cpumask_t *cpu_map) | ||
358 | { | ||
359 | cpumask_t cpu_default_map; | ||
360 | |||
361 | /* | ||
362 | * Setup mask for cpus without special case scheduling requirements. | ||
363 | * For now this just excludes isolated cpus, but could be used to | ||
364 | * exclude other special cases in the future. | ||
365 | */ | ||
366 | cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); | ||
367 | |||
368 | build_sched_domains(&cpu_default_map); | ||
369 | } | ||
362 | 370 | ||
363 | void __devinit arch_destroy_sched_domains(void) | 371 | void arch_destroy_sched_domains(const cpumask_t *cpu_map) |
364 | { | 372 | { |
365 | #ifdef CONFIG_NUMA | 373 | #ifdef CONFIG_NUMA |
366 | int i; | 374 | int i; |
367 | for (i = 0; i < MAX_NUMNODES; i++) { | 375 | for (i = 0; i < MAX_NUMNODES; i++) { |
376 | cpumask_t nodemask = node_to_cpumask(i); | ||
368 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 377 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
378 | |||
379 | cpus_and(nodemask, nodemask, *cpu_map); | ||
380 | if (cpus_empty(nodemask)) | ||
381 | continue; | ||
382 | |||
369 | if (sg == NULL) | 383 | if (sg == NULL) |
370 | continue; | 384 | continue; |
371 | sg = sg->next; | 385 | sg = sg->next; |