diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-07 08:09:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-11 06:58:23 -0400 |
commit | f96225fd51893b6650cffd5427f13f6b1b356488 (patch) | |
tree | 24ce12cef2bf87dbcedb3f413b6301ba6215a531 /kernel/sched.c | |
parent | 7dd04b730749f957c116f363524fd622b05e5141 (diff) |
sched: Create persistent sched_domains_tmpmask
Since sched domain creation is fully serialized by the
sched_domains_mutex we can create a single persistent tmpmask to use
during domain creation.
This removes the need for s_data::send_covered.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110407122942.607287405@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 17 |
1 files changed, 9 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 5ec685ce516a..fd73e91be089 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6822,7 +6822,6 @@ struct sd_data { | |||
6822 | }; | 6822 | }; |
6823 | 6823 | ||
6824 | struct s_data { | 6824 | struct s_data { |
6825 | cpumask_var_t send_covered; | ||
6826 | struct sched_domain ** __percpu sd; | 6825 | struct sched_domain ** __percpu sd; |
6827 | struct sd_data sdd[SD_LV_MAX]; | 6826 | struct sd_data sdd[SD_LV_MAX]; |
6828 | struct root_domain *rd; | 6827 | struct root_domain *rd; |
@@ -6832,7 +6831,6 @@ enum s_alloc { | |||
6832 | sa_rootdomain, | 6831 | sa_rootdomain, |
6833 | sa_sd, | 6832 | sa_sd, |
6834 | sa_sd_storage, | 6833 | sa_sd_storage, |
6835 | sa_send_covered, | ||
6836 | sa_none, | 6834 | sa_none, |
6837 | }; | 6835 | }; |
6838 | 6836 | ||
@@ -6853,6 +6851,8 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) | |||
6853 | return cpu; | 6851 | return cpu; |
6854 | } | 6852 | } |
6855 | 6853 | ||
6854 | static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ | ||
6855 | |||
6856 | /* | 6856 | /* |
6857 | * build_sched_groups takes the cpumask we wish to span, and a pointer | 6857 | * build_sched_groups takes the cpumask we wish to span, and a pointer |
6858 | * to a function which identifies what group(along with sched group) a CPU | 6858 | * to a function which identifies what group(along with sched group) a CPU |
@@ -6864,13 +6864,17 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) | |||
6864 | * and ->cpu_power to 0. | 6864 | * and ->cpu_power to 0. |
6865 | */ | 6865 | */ |
6866 | static void | 6866 | static void |
6867 | build_sched_groups(struct sched_domain *sd, struct cpumask *covered) | 6867 | build_sched_groups(struct sched_domain *sd) |
6868 | { | 6868 | { |
6869 | struct sched_group *first = NULL, *last = NULL; | 6869 | struct sched_group *first = NULL, *last = NULL; |
6870 | struct sd_data *sdd = sd->private; | 6870 | struct sd_data *sdd = sd->private; |
6871 | const struct cpumask *span = sched_domain_span(sd); | 6871 | const struct cpumask *span = sched_domain_span(sd); |
6872 | struct cpumask *covered; | ||
6872 | int i; | 6873 | int i; |
6873 | 6874 | ||
6875 | lockdep_assert_held(&sched_domains_mutex); | ||
6876 | covered = sched_domains_tmpmask; | ||
6877 | |||
6874 | cpumask_clear(covered); | 6878 | cpumask_clear(covered); |
6875 | 6879 | ||
6876 | for_each_cpu(i, span) { | 6880 | for_each_cpu(i, span) { |
@@ -7015,8 +7019,6 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, | |||
7015 | free_percpu(d->sdd[i].sd); | 7019 | free_percpu(d->sdd[i].sd); |
7016 | free_percpu(d->sdd[i].sg); | 7020 | free_percpu(d->sdd[i].sg); |
7017 | } /* fall through */ | 7021 | } /* fall through */ |
7018 | case sa_send_covered: | ||
7019 | free_cpumask_var(d->send_covered); /* fall through */ | ||
7020 | case sa_none: | 7022 | case sa_none: |
7021 | break; | 7023 | break; |
7022 | } | 7024 | } |
@@ -7029,8 +7031,6 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, | |||
7029 | 7031 | ||
7030 | memset(d, 0, sizeof(*d)); | 7032 | memset(d, 0, sizeof(*d)); |
7031 | 7033 | ||
7032 | if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) | ||
7033 | return sa_none; | ||
7034 | for (i = 0; i < SD_LV_MAX; i++) { | 7034 | for (i = 0; i < SD_LV_MAX; i++) { |
7035 | d->sdd[i].sd = alloc_percpu(struct sched_domain *); | 7035 | d->sdd[i].sd = alloc_percpu(struct sched_domain *); |
7036 | if (!d->sdd[i].sd) | 7036 | if (!d->sdd[i].sd) |
@@ -7219,7 +7219,7 @@ static int build_sched_domains(const struct cpumask *cpu_map, | |||
7219 | if (i != cpumask_first(sched_domain_span(sd))) | 7219 | if (i != cpumask_first(sched_domain_span(sd))) |
7220 | continue; | 7220 | continue; |
7221 | 7221 | ||
7222 | build_sched_groups(sd, d.send_covered); | 7222 | build_sched_groups(sd); |
7223 | } | 7223 | } |
7224 | } | 7224 | } |
7225 | 7225 | ||
@@ -7896,6 +7896,7 @@ void __init sched_init(void) | |||
7896 | 7896 | ||
7897 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ | 7897 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ |
7898 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); | 7898 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); |
7899 | zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); | ||
7899 | #ifdef CONFIG_SMP | 7900 | #ifdef CONFIG_SMP |
7900 | #ifdef CONFIG_NO_HZ | 7901 | #ifdef CONFIG_NO_HZ |
7901 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); | 7902 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); |