diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-07 08:09:54 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-11 06:58:21 -0400 |
commit | 3bd65a80affb9768b91f03c56dba46ee79525f9b (patch) | |
tree | 7c62ee65e2fcd936c35eb6e6196e41b2313a332d /kernel/sched.c | |
parent | 3859173d43658d51a749bc0201b943922577d39c (diff) |
sched: Simplify NODE/ALLNODES domain creation
Don't treat ALLNODES/NODE different for difference's sake. Simply
always create the ALLNODES domain and let the sd_degenerate() checks
kill it when its redundant. This simplifies the code flow.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110407122942.455464579@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 40 |
1 files changed, 22 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 72c194c55c31..d395fe5493c9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6838,9 +6838,6 @@ struct sd_data { | |||
6838 | }; | 6838 | }; |
6839 | 6839 | ||
6840 | struct s_data { | 6840 | struct s_data { |
6841 | #ifdef CONFIG_NUMA | ||
6842 | int sd_allnodes; | ||
6843 | #endif | ||
6844 | cpumask_var_t nodemask; | 6841 | cpumask_var_t nodemask; |
6845 | cpumask_var_t send_covered; | 6842 | cpumask_var_t send_covered; |
6846 | struct sched_domain ** __percpu sd; | 6843 | struct sched_domain ** __percpu sd; |
@@ -7112,30 +7109,35 @@ static void claim_allocations(int cpu, struct sched_domain *sd) | |||
7112 | } | 7109 | } |
7113 | } | 7110 | } |
7114 | 7111 | ||
7115 | static struct sched_domain *__build_numa_sched_domains(struct s_data *d, | 7112 | static struct sched_domain *__build_allnodes_sched_domain(struct s_data *d, |
7116 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i) | 7113 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, |
7114 | struct sched_domain *parent, int i) | ||
7117 | { | 7115 | { |
7118 | struct sched_domain *sd = NULL; | 7116 | struct sched_domain *sd = NULL; |
7119 | #ifdef CONFIG_NUMA | 7117 | #ifdef CONFIG_NUMA |
7120 | struct sched_domain *parent; | 7118 | sd = sd_init_ALLNODES(d, i); |
7121 | 7119 | set_domain_attribute(sd, attr); | |
7122 | d->sd_allnodes = 0; | 7120 | cpumask_copy(sched_domain_span(sd), cpu_map); |
7123 | if (cpumask_weight(cpu_map) > | 7121 | sd->parent = parent; |
7124 | SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) { | 7122 | if (parent) |
7125 | sd = sd_init_ALLNODES(d, i); | 7123 | parent->child = sd; |
7126 | set_domain_attribute(sd, attr); | 7124 | #endif |
7127 | cpumask_copy(sched_domain_span(sd), cpu_map); | 7125 | return sd; |
7128 | d->sd_allnodes = 1; | 7126 | } |
7129 | } | ||
7130 | parent = sd; | ||
7131 | 7127 | ||
7128 | static struct sched_domain *__build_node_sched_domain(struct s_data *d, | ||
7129 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | ||
7130 | struct sched_domain *parent, int i) | ||
7131 | { | ||
7132 | struct sched_domain *sd = NULL; | ||
7133 | #ifdef CONFIG_NUMA | ||
7132 | sd = sd_init_NODE(d, i); | 7134 | sd = sd_init_NODE(d, i); |
7133 | set_domain_attribute(sd, attr); | 7135 | set_domain_attribute(sd, attr); |
7134 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); | 7136 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); |
7137 | cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map); | ||
7135 | sd->parent = parent; | 7138 | sd->parent = parent; |
7136 | if (parent) | 7139 | if (parent) |
7137 | parent->child = sd; | 7140 | parent->child = sd; |
7138 | cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map); | ||
7139 | #endif | 7141 | #endif |
7140 | return sd; | 7142 | return sd; |
7141 | } | 7143 | } |
@@ -7220,7 +7222,9 @@ static int build_sched_domains(const struct cpumask *cpu_map, | |||
7220 | cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)), | 7222 | cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)), |
7221 | cpu_map); | 7223 | cpu_map); |
7222 | 7224 | ||
7223 | sd = __build_numa_sched_domains(&d, cpu_map, attr, i); | 7225 | sd = NULL; |
7226 | sd = __build_allnodes_sched_domain(&d, cpu_map, attr, sd, i); | ||
7227 | sd = __build_node_sched_domain(&d, cpu_map, attr, sd, i); | ||
7224 | sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i); | 7228 | sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i); |
7225 | sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i); | 7229 | sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i); |
7226 | sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); | 7230 | sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); |