aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-07 08:10:02 -0400
committerIngo Molnar <mingo@elte.hu>2011-04-11 08:09:29 -0400
commitd069b916f7b50021d41d6ce498f86da32a7afaec (patch)
treea8ebe91930f393edf737d2c08540a1dd218c94d6 /kernel
parent2c402dc3bb502e9dd74fce72c14d293fcef4719d (diff)
sched: Reverse the topology list
In order to get rid of static sched_domain::level assignments, reverse the topology iteration. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110407122942.876506131@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c34
1 files changed, 20 insertions, 14 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 00d1e37b4596..38bc53b576a7 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7114,20 +7114,23 @@ static const struct cpumask *cpu_smt_mask(int cpu)
7114} 7114}
7115#endif 7115#endif
7116 7116
7117/*
7118 * Topology list, bottom-up.
7119 */
7117static struct sched_domain_topology_level default_topology[] = { 7120static struct sched_domain_topology_level default_topology[] = {
7118#ifdef CONFIG_NUMA 7121#ifdef CONFIG_SCHED_SMT
7119 { sd_init_ALLNODES, cpu_allnodes_mask, }, 7122 { sd_init_SIBLING, cpu_smt_mask, },
7120 { sd_init_NODE, cpu_node_mask, },
7121#endif
7122 { sd_init_CPU, cpu_cpu_mask, },
7123#ifdef CONFIG_SCHED_BOOK
7124 { sd_init_BOOK, cpu_book_mask, },
7125#endif 7123#endif
7126#ifdef CONFIG_SCHED_MC 7124#ifdef CONFIG_SCHED_MC
7127 { sd_init_MC, cpu_coregroup_mask, }, 7125 { sd_init_MC, cpu_coregroup_mask, },
7128#endif 7126#endif
7129#ifdef CONFIG_SCHED_SMT 7127#ifdef CONFIG_SCHED_BOOK
7130 { sd_init_SIBLING, cpu_smt_mask, }, 7128 { sd_init_BOOK, cpu_book_mask, },
7129#endif
7130 { sd_init_CPU, cpu_cpu_mask, },
7131#ifdef CONFIG_NUMA
7132 { sd_init_NODE, cpu_node_mask, },
7133 { sd_init_ALLNODES, cpu_allnodes_mask, },
7131#endif 7134#endif
7132 { NULL, }, 7135 { NULL, },
7133}; 7136};
@@ -7136,18 +7139,18 @@ static struct sched_domain_topology_level *sched_domain_topology = default_topol
7136 7139
7137struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 7140struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
7138 struct s_data *d, const struct cpumask *cpu_map, 7141 struct s_data *d, const struct cpumask *cpu_map,
7139 struct sched_domain_attr *attr, struct sched_domain *parent, 7142 struct sched_domain_attr *attr, struct sched_domain *child,
7140 int cpu) 7143 int cpu)
7141{ 7144{
7142 struct sched_domain *sd = tl->init(d, cpu); 7145 struct sched_domain *sd = tl->init(d, cpu);
7143 if (!sd) 7146 if (!sd)
7144 return parent; 7147 return child;
7145 7148
7146 set_domain_attribute(sd, attr); 7149 set_domain_attribute(sd, attr);
7147 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); 7150 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
7148 sd->parent = parent; 7151 if (child)
7149 if (parent) 7152 child->parent = sd;
7150 parent->child = sd; 7153 sd->child = child;
7151 7154
7152 return sd; 7155 return sd;
7153} 7156}
@@ -7176,6 +7179,9 @@ static int build_sched_domains(const struct cpumask *cpu_map,
7176 for (tl = sched_domain_topology; tl->init; tl++) 7179 for (tl = sched_domain_topology; tl->init; tl++)
7177 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i); 7180 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
7178 7181
7182 while (sd->child)
7183 sd = sd->child;
7184
7179 *per_cpu_ptr(d.sd, i) = sd; 7185 *per_cpu_ptr(d.sd, i) = sd;
7180 } 7186 }
7181 7187