diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-07 08:09:58 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-11 08:05:00 -0400 |
commit | 4cb988395da6e16627a8be69729e50cd72ebb23e (patch) | |
tree | 17a655fbac5348de9a70ad8726354fa878bc66cb /kernel/sched.c | |
parent | f96225fd51893b6650cffd5427f13f6b1b356488 (diff) |
sched: Avoid allocations in sched_domain_debug()
Since we're all serialized by sched_domains_mutex we can use
sched_domains_tmpmask and avoid having to do allocations. This means
we can use sched_domains_debug() for cpu_attach_domain() again.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110407122942.664347467@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 17 |
1 files changed, 5 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index fd73e91be089..35fc9959b564 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6395,6 +6395,8 @@ early_initcall(migration_init); | |||
6395 | 6395 | ||
6396 | #ifdef CONFIG_SMP | 6396 | #ifdef CONFIG_SMP |
6397 | 6397 | ||
6398 | static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ | ||
6399 | |||
6398 | #ifdef CONFIG_SCHED_DEBUG | 6400 | #ifdef CONFIG_SCHED_DEBUG |
6399 | 6401 | ||
6400 | static __read_mostly int sched_domain_debug_enabled; | 6402 | static __read_mostly int sched_domain_debug_enabled; |
@@ -6490,7 +6492,6 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6490 | 6492 | ||
6491 | static void sched_domain_debug(struct sched_domain *sd, int cpu) | 6493 | static void sched_domain_debug(struct sched_domain *sd, int cpu) |
6492 | { | 6494 | { |
6493 | cpumask_var_t groupmask; | ||
6494 | int level = 0; | 6495 | int level = 0; |
6495 | 6496 | ||
6496 | if (!sched_domain_debug_enabled) | 6497 | if (!sched_domain_debug_enabled) |
@@ -6503,20 +6504,14 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6503 | 6504 | ||
6504 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); | 6505 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); |
6505 | 6506 | ||
6506 | if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) { | ||
6507 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); | ||
6508 | return; | ||
6509 | } | ||
6510 | |||
6511 | for (;;) { | 6507 | for (;;) { |
6512 | if (sched_domain_debug_one(sd, cpu, level, groupmask)) | 6508 | if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) |
6513 | break; | 6509 | break; |
6514 | level++; | 6510 | level++; |
6515 | sd = sd->parent; | 6511 | sd = sd->parent; |
6516 | if (!sd) | 6512 | if (!sd) |
6517 | break; | 6513 | break; |
6518 | } | 6514 | } |
6519 | free_cpumask_var(groupmask); | ||
6520 | } | 6515 | } |
6521 | #else /* !CONFIG_SCHED_DEBUG */ | 6516 | #else /* !CONFIG_SCHED_DEBUG */ |
6522 | # define sched_domain_debug(sd, cpu) do { } while (0) | 6517 | # define sched_domain_debug(sd, cpu) do { } while (0) |
@@ -6721,7 +6716,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
6721 | sd->child = NULL; | 6716 | sd->child = NULL; |
6722 | } | 6717 | } |
6723 | 6718 | ||
6724 | /* sched_domain_debug(sd, cpu); */ | 6719 | sched_domain_debug(sd, cpu); |
6725 | 6720 | ||
6726 | rq_attach_root(rq, rd); | 6721 | rq_attach_root(rq, rd); |
6727 | tmp = rq->sd; | 6722 | tmp = rq->sd; |
@@ -6851,8 +6846,6 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) | |||
6851 | return cpu; | 6846 | return cpu; |
6852 | } | 6847 | } |
6853 | 6848 | ||
6854 | static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ | ||
6855 | |||
6856 | /* | 6849 | /* |
6857 | * build_sched_groups takes the cpumask we wish to span, and a pointer | 6850 | * build_sched_groups takes the cpumask we wish to span, and a pointer |
6858 | * to a function which identifies what group(along with sched group) a CPU | 6851 | * to a function which identifies what group(along with sched group) a CPU |
@@ -7896,8 +7889,8 @@ void __init sched_init(void) | |||
7896 | 7889 | ||
7897 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ | 7890 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ |
7898 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); | 7891 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); |
7899 | zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); | ||
7900 | #ifdef CONFIG_SMP | 7892 | #ifdef CONFIG_SMP |
7893 | zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); | ||
7901 | #ifdef CONFIG_NO_HZ | 7894 | #ifdef CONFIG_NO_HZ |
7902 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); | 7895 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); |
7903 | alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT); | 7896 | alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT); |