aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorSiddha, Suresh B <suresh.b.siddha@intel.com>2006-10-03 04:14:06 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-03 11:04:06 -0400
commita616058b7815aafb2163fc795e02a055b0dbc5e2 (patch)
tree80a98b76f410fd154195e4ee17d1a47f2e6482f6 /kernel/sched.c
parent5c1e176781f43bc902a51e5832f789756bff911b (diff)
[PATCH] sched: remove unnecessary sched group allocations
Remove dynamic sched group allocations for MC and SMP domains. These allocations can easily fail on big systems(1024 or so CPUs) and we can live with out these dynamic allocations. [akpm@osdl.org: build fix] Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Acked-by: Ingo Molnar <mingo@elte.hu> Acked-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c104
1 files changed, 38 insertions, 66 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ddf418810c39..6b956bd9b49a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5489,15 +5489,17 @@ __setup ("isolcpus=", isolated_cpu_setup);
5489 * covered by the given span, and will set each group's ->cpumask correctly, 5489 * covered by the given span, and will set each group's ->cpumask correctly,
5490 * and ->cpu_power to 0. 5490 * and ->cpu_power to 0.
5491 */ 5491 */
5492static void init_sched_build_groups(struct sched_group groups[], cpumask_t span, 5492static void
5493 int (*group_fn)(int cpu)) 5493init_sched_build_groups(struct sched_group groups[], cpumask_t span,
5494 const cpumask_t *cpu_map,
5495 int (*group_fn)(int cpu, const cpumask_t *cpu_map))
5494{ 5496{
5495 struct sched_group *first = NULL, *last = NULL; 5497 struct sched_group *first = NULL, *last = NULL;
5496 cpumask_t covered = CPU_MASK_NONE; 5498 cpumask_t covered = CPU_MASK_NONE;
5497 int i; 5499 int i;
5498 5500
5499 for_each_cpu_mask(i, span) { 5501 for_each_cpu_mask(i, span) {
5500 int group = group_fn(i); 5502 int group = group_fn(i, cpu_map);
5501 struct sched_group *sg = &groups[group]; 5503 struct sched_group *sg = &groups[group];
5502 int j; 5504 int j;
5503 5505
@@ -5508,7 +5510,7 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
5508 sg->cpu_power = 0; 5510 sg->cpu_power = 0;
5509 5511
5510 for_each_cpu_mask(j, span) { 5512 for_each_cpu_mask(j, span) {
5511 if (group_fn(j) != group) 5513 if (group_fn(j, cpu_map) != group)
5512 continue; 5514 continue;
5513 5515
5514 cpu_set(j, covered); 5516 cpu_set(j, covered);
@@ -6084,7 +6086,7 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
6084static DEFINE_PER_CPU(struct sched_domain, cpu_domains); 6086static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
6085static struct sched_group sched_group_cpus[NR_CPUS]; 6087static struct sched_group sched_group_cpus[NR_CPUS];
6086 6088
6087static int cpu_to_cpu_group(int cpu) 6089static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map)
6088{ 6090{
6089 return cpu; 6091 return cpu;
6090} 6092}
@@ -6095,31 +6097,36 @@ static int cpu_to_cpu_group(int cpu)
6095 */ 6097 */
6096#ifdef CONFIG_SCHED_MC 6098#ifdef CONFIG_SCHED_MC
6097static DEFINE_PER_CPU(struct sched_domain, core_domains); 6099static DEFINE_PER_CPU(struct sched_domain, core_domains);
6098static struct sched_group *sched_group_core_bycpu[NR_CPUS]; 6100static struct sched_group sched_group_core[NR_CPUS];
6099#endif 6101#endif
6100 6102
6101#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) 6103#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
6102static int cpu_to_core_group(int cpu) 6104static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map)
6103{ 6105{
6104 return first_cpu(cpu_sibling_map[cpu]); 6106 cpumask_t mask = cpu_sibling_map[cpu];
6107 cpus_and(mask, mask, *cpu_map);
6108 return first_cpu(mask);
6105} 6109}
6106#elif defined(CONFIG_SCHED_MC) 6110#elif defined(CONFIG_SCHED_MC)
6107static int cpu_to_core_group(int cpu) 6111static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map)
6108{ 6112{
6109 return cpu; 6113 return cpu;
6110} 6114}
6111#endif 6115#endif
6112 6116
6113static DEFINE_PER_CPU(struct sched_domain, phys_domains); 6117static DEFINE_PER_CPU(struct sched_domain, phys_domains);
6114static struct sched_group *sched_group_phys_bycpu[NR_CPUS]; 6118static struct sched_group sched_group_phys[NR_CPUS];
6115 6119
6116static int cpu_to_phys_group(int cpu) 6120static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map)
6117{ 6121{
6118#ifdef CONFIG_SCHED_MC 6122#ifdef CONFIG_SCHED_MC
6119 cpumask_t mask = cpu_coregroup_map(cpu); 6123 cpumask_t mask = cpu_coregroup_map(cpu);
6124 cpus_and(mask, mask, *cpu_map);
6120 return first_cpu(mask); 6125 return first_cpu(mask);
6121#elif defined(CONFIG_SCHED_SMT) 6126#elif defined(CONFIG_SCHED_SMT)
6122 return first_cpu(cpu_sibling_map[cpu]); 6127 cpumask_t mask = cpu_sibling_map[cpu];
6128 cpus_and(mask, mask, *cpu_map);
6129 return first_cpu(mask);
6123#else 6130#else
6124 return cpu; 6131 return cpu;
6125#endif 6132#endif
@@ -6137,7 +6144,7 @@ static struct sched_group **sched_group_nodes_bycpu[NR_CPUS];
6137static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); 6144static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
6138static struct sched_group *sched_group_allnodes_bycpu[NR_CPUS]; 6145static struct sched_group *sched_group_allnodes_bycpu[NR_CPUS];
6139 6146
6140static int cpu_to_allnodes_group(int cpu) 6147static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map)
6141{ 6148{
6142 return cpu_to_node(cpu); 6149 return cpu_to_node(cpu);
6143} 6150}
@@ -6169,12 +6176,11 @@ next_sg:
6169} 6176}
6170#endif 6177#endif
6171 6178
6179#ifdef CONFIG_NUMA
6172/* Free memory allocated for various sched_group structures */ 6180/* Free memory allocated for various sched_group structures */
6173static void free_sched_groups(const cpumask_t *cpu_map) 6181static void free_sched_groups(const cpumask_t *cpu_map)
6174{ 6182{
6175 int cpu; 6183 int cpu, i;
6176#ifdef CONFIG_NUMA
6177 int i;
6178 6184
6179 for_each_cpu_mask(cpu, *cpu_map) { 6185 for_each_cpu_mask(cpu, *cpu_map) {
6180 struct sched_group *sched_group_allnodes 6186 struct sched_group *sched_group_allnodes
@@ -6211,20 +6217,12 @@ next_sg:
6211 kfree(sched_group_nodes); 6217 kfree(sched_group_nodes);
6212 sched_group_nodes_bycpu[cpu] = NULL; 6218 sched_group_nodes_bycpu[cpu] = NULL;
6213 } 6219 }
6214#endif
6215 for_each_cpu_mask(cpu, *cpu_map) {
6216 if (sched_group_phys_bycpu[cpu]) {
6217 kfree(sched_group_phys_bycpu[cpu]);
6218 sched_group_phys_bycpu[cpu] = NULL;
6219 }
6220#ifdef CONFIG_SCHED_MC
6221 if (sched_group_core_bycpu[cpu]) {
6222 kfree(sched_group_core_bycpu[cpu]);
6223 sched_group_core_bycpu[cpu] = NULL;
6224 }
6225#endif
6226 }
6227} 6220}
6221#else
6222static void free_sched_groups(const cpumask_t *cpu_map)
6223{
6224}
6225#endif
6228 6226
6229/* 6227/*
6230 * Build sched domains for a given set of cpus and attach the sched domains 6228 * Build sched domains for a given set of cpus and attach the sched domains
@@ -6233,10 +6231,6 @@ next_sg:
6233static int build_sched_domains(const cpumask_t *cpu_map) 6231static int build_sched_domains(const cpumask_t *cpu_map)
6234{ 6232{
6235 int i; 6233 int i;
6236 struct sched_group *sched_group_phys = NULL;
6237#ifdef CONFIG_SCHED_MC
6238 struct sched_group *sched_group_core = NULL;
6239#endif
6240#ifdef CONFIG_NUMA 6234#ifdef CONFIG_NUMA
6241 struct sched_group **sched_group_nodes = NULL; 6235 struct sched_group **sched_group_nodes = NULL;
6242 struct sched_group *sched_group_allnodes = NULL; 6236 struct sched_group *sched_group_allnodes = NULL;
@@ -6282,7 +6276,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6282 sd = &per_cpu(allnodes_domains, i); 6276 sd = &per_cpu(allnodes_domains, i);
6283 *sd = SD_ALLNODES_INIT; 6277 *sd = SD_ALLNODES_INIT;
6284 sd->span = *cpu_map; 6278 sd->span = *cpu_map;
6285 group = cpu_to_allnodes_group(i); 6279 group = cpu_to_allnodes_group(i, cpu_map);
6286 sd->groups = &sched_group_allnodes[group]; 6280 sd->groups = &sched_group_allnodes[group];
6287 p = sd; 6281 p = sd;
6288 } else 6282 } else
@@ -6295,42 +6289,18 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6295 cpus_and(sd->span, sd->span, *cpu_map); 6289 cpus_and(sd->span, sd->span, *cpu_map);
6296#endif 6290#endif
6297 6291
6298 if (!sched_group_phys) {
6299 sched_group_phys
6300 = kmalloc(sizeof(struct sched_group) * NR_CPUS,
6301 GFP_KERNEL);
6302 if (!sched_group_phys) {
6303 printk (KERN_WARNING "Can not alloc phys sched"
6304 "group\n");
6305 goto error;
6306 }
6307 sched_group_phys_bycpu[i] = sched_group_phys;
6308 }
6309
6310 p = sd; 6292 p = sd;
6311 sd = &per_cpu(phys_domains, i); 6293 sd = &per_cpu(phys_domains, i);
6312 group = cpu_to_phys_group(i); 6294 group = cpu_to_phys_group(i, cpu_map);
6313 *sd = SD_CPU_INIT; 6295 *sd = SD_CPU_INIT;
6314 sd->span = nodemask; 6296 sd->span = nodemask;
6315 sd->parent = p; 6297 sd->parent = p;
6316 sd->groups = &sched_group_phys[group]; 6298 sd->groups = &sched_group_phys[group];
6317 6299
6318#ifdef CONFIG_SCHED_MC 6300#ifdef CONFIG_SCHED_MC
6319 if (!sched_group_core) {
6320 sched_group_core
6321 = kmalloc(sizeof(struct sched_group) * NR_CPUS,
6322 GFP_KERNEL);
6323 if (!sched_group_core) {
6324 printk (KERN_WARNING "Can not alloc core sched"
6325 "group\n");
6326 goto error;
6327 }
6328 sched_group_core_bycpu[i] = sched_group_core;
6329 }
6330
6331 p = sd; 6301 p = sd;
6332 sd = &per_cpu(core_domains, i); 6302 sd = &per_cpu(core_domains, i);
6333 group = cpu_to_core_group(i); 6303 group = cpu_to_core_group(i, cpu_map);
6334 *sd = SD_MC_INIT; 6304 *sd = SD_MC_INIT;
6335 sd->span = cpu_coregroup_map(i); 6305 sd->span = cpu_coregroup_map(i);
6336 cpus_and(sd->span, sd->span, *cpu_map); 6306 cpus_and(sd->span, sd->span, *cpu_map);
@@ -6341,7 +6311,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6341#ifdef CONFIG_SCHED_SMT 6311#ifdef CONFIG_SCHED_SMT
6342 p = sd; 6312 p = sd;
6343 sd = &per_cpu(cpu_domains, i); 6313 sd = &per_cpu(cpu_domains, i);
6344 group = cpu_to_cpu_group(i); 6314 group = cpu_to_cpu_group(i, cpu_map);
6345 *sd = SD_SIBLING_INIT; 6315 *sd = SD_SIBLING_INIT;
6346 sd->span = cpu_sibling_map[i]; 6316 sd->span = cpu_sibling_map[i];
6347 cpus_and(sd->span, sd->span, *cpu_map); 6317 cpus_and(sd->span, sd->span, *cpu_map);
@@ -6359,7 +6329,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6359 continue; 6329 continue;
6360 6330
6361 init_sched_build_groups(sched_group_cpus, this_sibling_map, 6331 init_sched_build_groups(sched_group_cpus, this_sibling_map,
6362 &cpu_to_cpu_group); 6332 cpu_map, &cpu_to_cpu_group);
6363 } 6333 }
6364#endif 6334#endif
6365 6335
@@ -6371,7 +6341,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6371 if (i != first_cpu(this_core_map)) 6341 if (i != first_cpu(this_core_map))
6372 continue; 6342 continue;
6373 init_sched_build_groups(sched_group_core, this_core_map, 6343 init_sched_build_groups(sched_group_core, this_core_map,
6374 &cpu_to_core_group); 6344 cpu_map, &cpu_to_core_group);
6375 } 6345 }
6376#endif 6346#endif
6377 6347
@@ -6385,14 +6355,14 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6385 continue; 6355 continue;
6386 6356
6387 init_sched_build_groups(sched_group_phys, nodemask, 6357 init_sched_build_groups(sched_group_phys, nodemask,
6388 &cpu_to_phys_group); 6358 cpu_map, &cpu_to_phys_group);
6389 } 6359 }
6390 6360
6391#ifdef CONFIG_NUMA 6361#ifdef CONFIG_NUMA
6392 /* Set up node groups */ 6362 /* Set up node groups */
6393 if (sched_group_allnodes) 6363 if (sched_group_allnodes)
6394 init_sched_build_groups(sched_group_allnodes, *cpu_map, 6364 init_sched_build_groups(sched_group_allnodes, *cpu_map,
6395 &cpu_to_allnodes_group); 6365 cpu_map, &cpu_to_allnodes_group);
6396 6366
6397 for (i = 0; i < MAX_NUMNODES; i++) { 6367 for (i = 0; i < MAX_NUMNODES; i++) {
6398 /* Set up node groups */ 6368 /* Set up node groups */
@@ -6537,7 +6507,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6537 init_numa_sched_groups_power(sched_group_nodes[i]); 6507 init_numa_sched_groups_power(sched_group_nodes[i]);
6538 6508
6539 if (sched_group_allnodes) { 6509 if (sched_group_allnodes) {
6540 int group = cpu_to_allnodes_group(first_cpu(*cpu_map)); 6510 int group = cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map);
6541 struct sched_group *sg = &sched_group_allnodes[group]; 6511 struct sched_group *sg = &sched_group_allnodes[group];
6542 6512
6543 init_numa_sched_groups_power(sg); 6513 init_numa_sched_groups_power(sg);
@@ -6563,9 +6533,11 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6563 6533
6564 return 0; 6534 return 0;
6565 6535
6536#ifdef CONFIG_NUMA
6566error: 6537error:
6567 free_sched_groups(cpu_map); 6538 free_sched_groups(cpu_map);
6568 return -ENOMEM; 6539 return -ENOMEM;
6540#endif
6569} 6541}
6570/* 6542/*
6571 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 6543 * Set up scheduler domains and groups. Callers must hold the hotplug lock.