aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSiddha, Suresh B <suresh.b.siddha@intel.com>2006-10-03 04:14:08 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-03 11:04:06 -0400
commit1a84887080dc15f048db7c3a643e98f1435790d6 (patch)
tree7cd335fee247c0b60f8562c82806b49435b5fb9d
parent74732646431a1bb7e23e6b564127a8881cfef900 (diff)
[PATCH] sched: introduce child field in sched_domain
Introduce the child field in sched_domain struct and use it in sched_balance_self(). We will also use this field in cleaning up the sched group cpu_power setup(done in a different patch) code. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Acked-by: Ingo Molnar <mingo@elte.hu> Acked-by: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Paul Jackson <pj@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/asm-i386/topology.h1
-rw-r--r--include/asm-ia64/topology.h2
-rw-r--r--include/asm-mips/mach-ip27/topology.h1
-rw-r--r--include/asm-powerpc/topology.h1
-rw-r--r--include/asm-x86_64/topology.h1
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/topology.h3
-rw-r--r--kernel/sched.c40
8 files changed, 40 insertions, 10 deletions
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index 6adbd9b1ae88..978d09596130 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -74,6 +74,7 @@ static inline int node_to_first_cpu(int node)
74#define SD_NODE_INIT (struct sched_domain) { \ 74#define SD_NODE_INIT (struct sched_domain) { \
75 .span = CPU_MASK_NONE, \ 75 .span = CPU_MASK_NONE, \
76 .parent = NULL, \ 76 .parent = NULL, \
77 .child = NULL, \
77 .groups = NULL, \ 78 .groups = NULL, \
78 .min_interval = 8, \ 79 .min_interval = 8, \
79 .max_interval = 32, \ 80 .max_interval = 32, \
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index 937c21257523..a6e38565ab4c 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -59,6 +59,7 @@ void build_cpu_to_node_map(void);
59#define SD_CPU_INIT (struct sched_domain) { \ 59#define SD_CPU_INIT (struct sched_domain) { \
60 .span = CPU_MASK_NONE, \ 60 .span = CPU_MASK_NONE, \
61 .parent = NULL, \ 61 .parent = NULL, \
62 .child = NULL, \
62 .groups = NULL, \ 63 .groups = NULL, \
63 .min_interval = 1, \ 64 .min_interval = 1, \
64 .max_interval = 4, \ 65 .max_interval = 4, \
@@ -84,6 +85,7 @@ void build_cpu_to_node_map(void);
84#define SD_NODE_INIT (struct sched_domain) { \ 85#define SD_NODE_INIT (struct sched_domain) { \
85 .span = CPU_MASK_NONE, \ 86 .span = CPU_MASK_NONE, \
86 .parent = NULL, \ 87 .parent = NULL, \
88 .child = NULL, \
87 .groups = NULL, \ 89 .groups = NULL, \
88 .min_interval = 8, \ 90 .min_interval = 8, \
89 .max_interval = 8*(min(num_online_cpus(), 32)), \ 91 .max_interval = 8*(min(num_online_cpus(), 32)), \
diff --git a/include/asm-mips/mach-ip27/topology.h b/include/asm-mips/mach-ip27/topology.h
index 59d26b52ba32..a13b715fd9ca 100644
--- a/include/asm-mips/mach-ip27/topology.h
+++ b/include/asm-mips/mach-ip27/topology.h
@@ -22,6 +22,7 @@ extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
22#define SD_NODE_INIT (struct sched_domain) { \ 22#define SD_NODE_INIT (struct sched_domain) { \
23 .span = CPU_MASK_NONE, \ 23 .span = CPU_MASK_NONE, \
24 .parent = NULL, \ 24 .parent = NULL, \
25 .child = NULL, \
25 .groups = NULL, \ 26 .groups = NULL, \
26 .min_interval = 8, \ 27 .min_interval = 8, \
27 .max_interval = 32, \ 28 .max_interval = 32, \
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index bbc3844b086f..8f7ee16781a4 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -43,6 +43,7 @@ extern int pcibus_to_node(struct pci_bus *bus);
43#define SD_NODE_INIT (struct sched_domain) { \ 43#define SD_NODE_INIT (struct sched_domain) { \
44 .span = CPU_MASK_NONE, \ 44 .span = CPU_MASK_NONE, \
45 .parent = NULL, \ 45 .parent = NULL, \
46 .child = NULL, \
46 .groups = NULL, \ 47 .groups = NULL, \
47 .min_interval = 8, \ 48 .min_interval = 8, \
48 .max_interval = 32, \ 49 .max_interval = 32, \
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 6e7a2e976b04..5c8f49280dbc 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -31,6 +31,7 @@ extern int __node_distance(int, int);
31#define SD_NODE_INIT (struct sched_domain) { \ 31#define SD_NODE_INIT (struct sched_domain) { \
32 .span = CPU_MASK_NONE, \ 32 .span = CPU_MASK_NONE, \
33 .parent = NULL, \ 33 .parent = NULL, \
34 .child = NULL, \
34 .groups = NULL, \ 35 .groups = NULL, \
35 .min_interval = 8, \ 36 .min_interval = 8, \
36 .max_interval = 32, \ 37 .max_interval = 32, \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 38530232d92f..8e26c9069f15 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -644,6 +644,7 @@ struct sched_group {
644struct sched_domain { 644struct sched_domain {
645 /* These fields must be setup */ 645 /* These fields must be setup */
646 struct sched_domain *parent; /* top domain must be null terminated */ 646 struct sched_domain *parent; /* top domain must be null terminated */
647 struct sched_domain *child; /* bottom domain must be null terminated */
647 struct sched_group *groups; /* the balancing groups of the domain */ 648 struct sched_group *groups; /* the balancing groups of the domain */
648 cpumask_t span; /* span of all CPUs in this domain */ 649 cpumask_t span; /* span of all CPUs in this domain */
649 unsigned long min_interval; /* Minimum balance interval ms */ 650 unsigned long min_interval; /* Minimum balance interval ms */
diff --git a/include/linux/topology.h b/include/linux/topology.h
index ec1eca85290a..486bec23f986 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -89,6 +89,7 @@
89#define SD_SIBLING_INIT (struct sched_domain) { \ 89#define SD_SIBLING_INIT (struct sched_domain) { \
90 .span = CPU_MASK_NONE, \ 90 .span = CPU_MASK_NONE, \
91 .parent = NULL, \ 91 .parent = NULL, \
92 .child = NULL, \
92 .groups = NULL, \ 93 .groups = NULL, \
93 .min_interval = 1, \ 94 .min_interval = 1, \
94 .max_interval = 2, \ 95 .max_interval = 2, \
@@ -119,6 +120,7 @@
119#define SD_CPU_INIT (struct sched_domain) { \ 120#define SD_CPU_INIT (struct sched_domain) { \
120 .span = CPU_MASK_NONE, \ 121 .span = CPU_MASK_NONE, \
121 .parent = NULL, \ 122 .parent = NULL, \
123 .child = NULL, \
122 .groups = NULL, \ 124 .groups = NULL, \
123 .min_interval = 1, \ 125 .min_interval = 1, \
124 .max_interval = 4, \ 126 .max_interval = 4, \
@@ -146,6 +148,7 @@
146#define SD_ALLNODES_INIT (struct sched_domain) { \ 148#define SD_ALLNODES_INIT (struct sched_domain) { \
147 .span = CPU_MASK_NONE, \ 149 .span = CPU_MASK_NONE, \
148 .parent = NULL, \ 150 .parent = NULL, \
151 .child = NULL, \
149 .groups = NULL, \ 152 .groups = NULL, \
150 .min_interval = 64, \ 153 .min_interval = 64, \
151 .max_interval = 64*num_online_cpus(), \ 154 .max_interval = 64*num_online_cpus(), \
diff --git a/kernel/sched.c b/kernel/sched.c
index 6d7bf55ec33d..0feeacb91497 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1286,21 +1286,29 @@ static int sched_balance_self(int cpu, int flag)
1286 while (sd) { 1286 while (sd) {
1287 cpumask_t span; 1287 cpumask_t span;
1288 struct sched_group *group; 1288 struct sched_group *group;
1289 int new_cpu; 1289 int new_cpu, weight;
1290 int weight; 1290
1291 if (!(sd->flags & flag)) {
1292 sd = sd->child;
1293 continue;
1294 }
1291 1295
1292 span = sd->span; 1296 span = sd->span;
1293 group = find_idlest_group(sd, t, cpu); 1297 group = find_idlest_group(sd, t, cpu);
1294 if (!group) 1298 if (!group) {
1295 goto nextlevel; 1299 sd = sd->child;
1300 continue;
1301 }
1296 1302
1297 new_cpu = find_idlest_cpu(group, t, cpu); 1303 new_cpu = find_idlest_cpu(group, t, cpu);
1298 if (new_cpu == -1 || new_cpu == cpu) 1304 if (new_cpu == -1 || new_cpu == cpu) {
1299 goto nextlevel; 1305 /* Now try balancing at a lower domain level of cpu */
1306 sd = sd->child;
1307 continue;
1308 }
1300 1309
1301 /* Now try balancing at a lower domain level */ 1310 /* Now try balancing at a lower domain level of new_cpu */
1302 cpu = new_cpu; 1311 cpu = new_cpu;
1303nextlevel:
1304 sd = NULL; 1312 sd = NULL;
1305 weight = cpus_weight(span); 1313 weight = cpus_weight(span);
1306 for_each_domain(cpu, tmp) { 1314 for_each_domain(cpu, tmp) {
@@ -5448,12 +5456,18 @@ static void cpu_attach_domain(struct sched_domain *sd, int cpu)
5448 struct sched_domain *parent = tmp->parent; 5456 struct sched_domain *parent = tmp->parent;
5449 if (!parent) 5457 if (!parent)
5450 break; 5458 break;
5451 if (sd_parent_degenerate(tmp, parent)) 5459 if (sd_parent_degenerate(tmp, parent)) {
5452 tmp->parent = parent->parent; 5460 tmp->parent = parent->parent;
5461 if (parent->parent)
5462 parent->parent->child = tmp;
5463 }
5453 } 5464 }
5454 5465
5455 if (sd && sd_degenerate(sd)) 5466 if (sd && sd_degenerate(sd)) {
5456 sd = sd->parent; 5467 sd = sd->parent;
5468 if (sd)
5469 sd->child = NULL;
5470 }
5457 5471
5458 sched_domain_debug(sd, cpu); 5472 sched_domain_debug(sd, cpu);
5459 5473
@@ -6288,6 +6302,8 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6288 *sd = SD_NODE_INIT; 6302 *sd = SD_NODE_INIT;
6289 sd->span = sched_domain_node_span(cpu_to_node(i)); 6303 sd->span = sched_domain_node_span(cpu_to_node(i));
6290 sd->parent = p; 6304 sd->parent = p;
6305 if (p)
6306 p->child = sd;
6291 cpus_and(sd->span, sd->span, *cpu_map); 6307 cpus_and(sd->span, sd->span, *cpu_map);
6292#endif 6308#endif
6293 6309
@@ -6297,6 +6313,8 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6297 *sd = SD_CPU_INIT; 6313 *sd = SD_CPU_INIT;
6298 sd->span = nodemask; 6314 sd->span = nodemask;
6299 sd->parent = p; 6315 sd->parent = p;
6316 if (p)
6317 p->child = sd;
6300 sd->groups = &sched_group_phys[group]; 6318 sd->groups = &sched_group_phys[group];
6301 6319
6302#ifdef CONFIG_SCHED_MC 6320#ifdef CONFIG_SCHED_MC
@@ -6307,6 +6325,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6307 sd->span = cpu_coregroup_map(i); 6325 sd->span = cpu_coregroup_map(i);
6308 cpus_and(sd->span, sd->span, *cpu_map); 6326 cpus_and(sd->span, sd->span, *cpu_map);
6309 sd->parent = p; 6327 sd->parent = p;
6328 p->child = sd;
6310 sd->groups = &sched_group_core[group]; 6329 sd->groups = &sched_group_core[group];
6311#endif 6330#endif
6312 6331
@@ -6318,6 +6337,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6318 sd->span = cpu_sibling_map[i]; 6337 sd->span = cpu_sibling_map[i];
6319 cpus_and(sd->span, sd->span, *cpu_map); 6338 cpus_and(sd->span, sd->span, *cpu_map);
6320 sd->parent = p; 6339 sd->parent = p;
6340 p->child = sd;
6321 sd->groups = &sched_group_cpus[group]; 6341 sd->groups = &sched_group_cpus[group];
6322#endif 6342#endif
6323 } 6343 }