diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-07 08:09:48 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-11 06:58:19 -0400 |
commit | 21d42ccfd6c6c11f96c2acfd32a85cfc33514d3a (patch) | |
tree | 49ad88cedc18409b7943bbaa1ef9fa75f8699f34 /kernel/sched.c | |
parent | 1cf51902546d60b8a7a6aba2dd557bd4ba8840ea (diff) |
sched: Simplify finding the lowest sched_domain
Instead of relying on knowing the build order and various CONFIG_
flags simply remember the bottom most sched_domain when we created the
domain hierarchy.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110407122942.134511046@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 23 |
1 files changed, 13 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index e66d24aaf6d1..d6992bfa11eb 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6865,11 +6865,13 @@ struct s_data { | |||
6865 | cpumask_var_t nodemask; | 6865 | cpumask_var_t nodemask; |
6866 | cpumask_var_t send_covered; | 6866 | cpumask_var_t send_covered; |
6867 | cpumask_var_t tmpmask; | 6867 | cpumask_var_t tmpmask; |
6868 | struct sched_domain ** __percpu sd; | ||
6868 | struct root_domain *rd; | 6869 | struct root_domain *rd; |
6869 | }; | 6870 | }; |
6870 | 6871 | ||
6871 | enum s_alloc { | 6872 | enum s_alloc { |
6872 | sa_rootdomain, | 6873 | sa_rootdomain, |
6874 | sa_sd, | ||
6873 | sa_tmpmask, | 6875 | sa_tmpmask, |
6874 | sa_send_covered, | 6876 | sa_send_covered, |
6875 | sa_nodemask, | 6877 | sa_nodemask, |
@@ -7104,6 +7106,8 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, | |||
7104 | switch (what) { | 7106 | switch (what) { |
7105 | case sa_rootdomain: | 7107 | case sa_rootdomain: |
7106 | free_rootdomain(d->rd); /* fall through */ | 7108 | free_rootdomain(d->rd); /* fall through */ |
7109 | case sa_sd: | ||
7110 | free_percpu(d->sd); /* fall through */ | ||
7107 | case sa_tmpmask: | 7111 | case sa_tmpmask: |
7108 | free_cpumask_var(d->tmpmask); /* fall through */ | 7112 | free_cpumask_var(d->tmpmask); /* fall through */ |
7109 | case sa_send_covered: | 7113 | case sa_send_covered: |
@@ -7124,10 +7128,15 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, | |||
7124 | return sa_nodemask; | 7128 | return sa_nodemask; |
7125 | if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL)) | 7129 | if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL)) |
7126 | return sa_send_covered; | 7130 | return sa_send_covered; |
7131 | d->sd = alloc_percpu(struct sched_domain *); | ||
7132 | if (!d->sd) { | ||
7133 | printk(KERN_WARNING "Cannot alloc per-cpu pointers\n"); | ||
7134 | return sa_tmpmask; | ||
7135 | } | ||
7127 | d->rd = alloc_rootdomain(); | 7136 | d->rd = alloc_rootdomain(); |
7128 | if (!d->rd) { | 7137 | if (!d->rd) { |
7129 | printk(KERN_WARNING "Cannot alloc root domain\n"); | 7138 | printk(KERN_WARNING "Cannot alloc root domain\n"); |
7130 | return sa_tmpmask; | 7139 | return sa_sd; |
7131 | } | 7140 | } |
7132 | return sa_rootdomain; | 7141 | return sa_rootdomain; |
7133 | } | 7142 | } |
@@ -7316,6 +7325,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7316 | sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); | 7325 | sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); |
7317 | sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); | 7326 | sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); |
7318 | 7327 | ||
7328 | *per_cpu_ptr(d.sd, i) = sd; | ||
7329 | |||
7319 | for (tmp = sd; tmp; tmp = tmp->parent) { | 7330 | for (tmp = sd; tmp; tmp = tmp->parent) { |
7320 | tmp->span_weight = cpumask_weight(sched_domain_span(tmp)); | 7331 | tmp->span_weight = cpumask_weight(sched_domain_span(tmp)); |
7321 | build_sched_groups(&d, tmp, cpu_map, i); | 7332 | build_sched_groups(&d, tmp, cpu_map, i); |
@@ -7363,15 +7374,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7363 | 7374 | ||
7364 | /* Attach the domains */ | 7375 | /* Attach the domains */ |
7365 | for_each_cpu(i, cpu_map) { | 7376 | for_each_cpu(i, cpu_map) { |
7366 | #ifdef CONFIG_SCHED_SMT | 7377 | sd = *per_cpu_ptr(d.sd, i); |
7367 | sd = &per_cpu(cpu_domains, i).sd; | ||
7368 | #elif defined(CONFIG_SCHED_MC) | ||
7369 | sd = &per_cpu(core_domains, i).sd; | ||
7370 | #elif defined(CONFIG_SCHED_BOOK) | ||
7371 | sd = &per_cpu(book_domains, i).sd; | ||
7372 | #else | ||
7373 | sd = &per_cpu(phys_domains, i).sd; | ||
7374 | #endif | ||
7375 | cpu_attach_domain(sd, d.rd, i); | 7378 | cpu_attach_domain(sd, d.rd, i); |
7376 | } | 7379 | } |
7377 | 7380 | ||