aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-07 08:10:01 -0400
committerIngo Molnar <mingo@elte.hu>2011-04-11 08:09:27 -0400
commit2c402dc3bb502e9dd74fce72c14d293fcef4719d (patch)
tree50b54fa22739e13685bb1e4f54a37551749f4ad4 /kernel/sched.c
parenteb7a74e6cd936c00749e2921b9e058631d986648 (diff)
sched: Unify the sched_domain build functions
Since all the __build_$DOM_sched_domain() functions do pretty much the same thing, unify them. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110407122942.826347257@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c133
1 files changed, 39 insertions, 94 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f0e1821dcb96..00d1e37b4596 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6816,6 +6816,11 @@ static const struct cpumask *cpu_node_mask(int cpu)
6816 6816
6817 return sched_domains_tmpmask; 6817 return sched_domains_tmpmask;
6818} 6818}
6819
6820static const struct cpumask *cpu_allnodes_mask(int cpu)
6821{
6822 return cpu_possible_mask;
6823}
6819#endif /* CONFIG_NUMA */ 6824#endif /* CONFIG_NUMA */
6820 6825
6821static const struct cpumask *cpu_cpu_mask(int cpu) 6826static const struct cpumask *cpu_cpu_mask(int cpu)
@@ -6843,14 +6848,12 @@ enum s_alloc {
6843 sa_none, 6848 sa_none,
6844}; 6849};
6845 6850
6846typedef struct sched_domain *(*sched_domain_build_f)(struct s_data *d, 6851typedef struct sched_domain *(*sched_domain_init_f)(struct s_data *d, int cpu);
6847 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
6848 struct sched_domain *parent, int cpu);
6849
6850typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); 6852typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
6851 6853
6852struct sched_domain_topology_level { 6854struct sched_domain_topology_level {
6853 sched_domain_build_f build; 6855 sched_domain_init_f init;
6856 sched_domain_mask_f mask;
6854}; 6857};
6855 6858
6856/* 6859/*
@@ -7104,109 +7107,51 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
7104 } 7107 }
7105} 7108}
7106 7109
7107static struct sched_domain *__build_allnodes_sched_domain(struct s_data *d, 7110#ifdef CONFIG_SCHED_SMT
7108 const struct cpumask *cpu_map, struct sched_domain_attr *attr, 7111static const struct cpumask *cpu_smt_mask(int cpu)
7109 struct sched_domain *parent, int i)
7110{ 7112{
7111 struct sched_domain *sd = NULL; 7113 return topology_thread_cpumask(cpu);
7112#ifdef CONFIG_NUMA
7113 sd = sd_init_ALLNODES(d, i);
7114 set_domain_attribute(sd, attr);
7115 cpumask_and(sched_domain_span(sd), cpu_map, cpu_possible_mask);
7116 sd->parent = parent;
7117 if (parent)
7118 parent->child = sd;
7119#endif
7120 return sd;
7121} 7114}
7115#endif
7122 7116
7123static struct sched_domain *__build_node_sched_domain(struct s_data *d, 7117static struct sched_domain_topology_level default_topology[] = {
7124 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7125 struct sched_domain *parent, int i)
7126{
7127 struct sched_domain *sd = NULL;
7128#ifdef CONFIG_NUMA 7118#ifdef CONFIG_NUMA
7129 sd = sd_init_NODE(d, i); 7119 { sd_init_ALLNODES, cpu_allnodes_mask, },
7130 set_domain_attribute(sd, attr); 7120 { sd_init_NODE, cpu_node_mask, },
7131 cpumask_and(sched_domain_span(sd), cpu_map, cpu_node_mask(i));
7132 sd->parent = parent;
7133 if (parent)
7134 parent->child = sd;
7135#endif 7121#endif
7136 return sd; 7122 { sd_init_CPU, cpu_cpu_mask, },
7137}
7138
7139static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
7140 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7141 struct sched_domain *parent, int i)
7142{
7143 struct sched_domain *sd;
7144 sd = sd_init_CPU(d, i);
7145 set_domain_attribute(sd, attr);
7146 cpumask_and(sched_domain_span(sd), cpu_map, cpu_cpu_mask(i));
7147 sd->parent = parent;
7148 if (parent)
7149 parent->child = sd;
7150 return sd;
7151}
7152
7153static struct sched_domain *__build_book_sched_domain(struct s_data *d,
7154 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7155 struct sched_domain *parent, int i)
7156{
7157 struct sched_domain *sd = parent;
7158#ifdef CONFIG_SCHED_BOOK 7123#ifdef CONFIG_SCHED_BOOK
7159 sd = sd_init_BOOK(d, i); 7124 { sd_init_BOOK, cpu_book_mask, },
7160 set_domain_attribute(sd, attr);
7161 cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i));
7162 sd->parent = parent;
7163 parent->child = sd;
7164#endif 7125#endif
7165 return sd;
7166}
7167
7168static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
7169 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7170 struct sched_domain *parent, int i)
7171{
7172 struct sched_domain *sd = parent;
7173#ifdef CONFIG_SCHED_MC 7126#ifdef CONFIG_SCHED_MC
7174 sd = sd_init_MC(d, i); 7127 { sd_init_MC, cpu_coregroup_mask, },
7175 set_domain_attribute(sd, attr);
7176 cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i));
7177 sd->parent = parent;
7178 parent->child = sd;
7179#endif 7128#endif
7180 return sd;
7181}
7182
7183static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
7184 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7185 struct sched_domain *parent, int i)
7186{
7187 struct sched_domain *sd = parent;
7188#ifdef CONFIG_SCHED_SMT 7129#ifdef CONFIG_SCHED_SMT
7189 sd = sd_init_SIBLING(d, i); 7130 { sd_init_SIBLING, cpu_smt_mask, },
7190 set_domain_attribute(sd, attr);
7191 cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
7192 sd->parent = parent;
7193 parent->child = sd;
7194#endif 7131#endif
7195 return sd;
7196}
7197
7198static struct sched_domain_topology_level default_topology[] = {
7199 { __build_allnodes_sched_domain, },
7200 { __build_node_sched_domain, },
7201 { __build_cpu_sched_domain, },
7202 { __build_book_sched_domain, },
7203 { __build_mc_sched_domain, },
7204 { __build_smt_sched_domain, },
7205 { NULL, }, 7132 { NULL, },
7206}; 7133};
7207 7134
7208static struct sched_domain_topology_level *sched_domain_topology = default_topology; 7135static struct sched_domain_topology_level *sched_domain_topology = default_topology;
7209 7136
7137struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
7138 struct s_data *d, const struct cpumask *cpu_map,
7139 struct sched_domain_attr *attr, struct sched_domain *parent,
7140 int cpu)
7141{
7142 struct sched_domain *sd = tl->init(d, cpu);
7143 if (!sd)
7144 return parent;
7145
7146 set_domain_attribute(sd, attr);
7147 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
7148 sd->parent = parent;
7149 if (parent)
7150 parent->child = sd;
7151
7152 return sd;
7153}
7154
7210/* 7155/*
7211 * Build sched domains for a given set of cpus and attach the sched domains 7156 * Build sched domains for a given set of cpus and attach the sched domains
7212 * to the individual cpus 7157 * to the individual cpus
@@ -7228,8 +7173,8 @@ static int build_sched_domains(const struct cpumask *cpu_map,
7228 struct sched_domain_topology_level *tl; 7173 struct sched_domain_topology_level *tl;
7229 7174
7230 sd = NULL; 7175 sd = NULL;
7231 for (tl = sched_domain_topology; tl->build; tl++) 7176 for (tl = sched_domain_topology; tl->init; tl++)
7232 sd = tl->build(&d, cpu_map, attr, sd, i); 7177 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
7233 7178
7234 *per_cpu_ptr(d.sd, i) = sd; 7179 *per_cpu_ptr(d.sd, i) = sd;
7235 } 7180 }