aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-07 08:09:55 -0400
committerIngo Molnar <mingo@elte.hu>2011-04-11 06:58:22 -0400
commitbf28b253266ebd73c331dde24d64606afde32ceb (patch)
tree7378b3c9fd37ecfdca30fa074f706624f8807f8e
parent3bd65a80affb9768b91f03c56dba46ee79525f9b (diff)
sched: Remove nodemask allocation
There's only one nodemask user left so remove it with a direct computation and save some memory and reduce some code-flow complexity. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110407122942.505608966@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched.c14
1 files changed, 3 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d395fe5493c9..f4d3a624c50a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6838,7 +6838,6 @@ struct sd_data {
6838}; 6838};
6839 6839
6840struct s_data { 6840struct s_data {
6841 cpumask_var_t nodemask;
6842 cpumask_var_t send_covered; 6841 cpumask_var_t send_covered;
6843 struct sched_domain ** __percpu sd; 6842 struct sched_domain ** __percpu sd;
6844 struct sd_data sdd[SD_LV_MAX]; 6843 struct sd_data sdd[SD_LV_MAX];
@@ -6850,7 +6849,6 @@ enum s_alloc {
6850 sa_sd, 6849 sa_sd,
6851 sa_sd_storage, 6850 sa_sd_storage,
6852 sa_send_covered, 6851 sa_send_covered,
6853 sa_nodemask,
6854 sa_none, 6852 sa_none,
6855}; 6853};
6856 6854
@@ -7035,8 +7033,6 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
7035 } /* fall through */ 7033 } /* fall through */
7036 case sa_send_covered: 7034 case sa_send_covered:
7037 free_cpumask_var(d->send_covered); /* fall through */ 7035 free_cpumask_var(d->send_covered); /* fall through */
7038 case sa_nodemask:
7039 free_cpumask_var(d->nodemask); /* fall through */
7040 case sa_none: 7036 case sa_none:
7041 break; 7037 break;
7042 } 7038 }
@@ -7049,10 +7045,8 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
7049 7045
7050 memset(d, 0, sizeof(*d)); 7046 memset(d, 0, sizeof(*d));
7051 7047
7052 if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
7053 return sa_none;
7054 if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) 7048 if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
7055 return sa_nodemask; 7049 return sa_none;
7056 for (i = 0; i < SD_LV_MAX; i++) { 7050 for (i = 0; i < SD_LV_MAX; i++) {
7057 d->sdd[i].sd = alloc_percpu(struct sched_domain *); 7051 d->sdd[i].sd = alloc_percpu(struct sched_domain *);
7058 if (!d->sdd[i].sd) 7052 if (!d->sdd[i].sd)
@@ -7149,7 +7143,8 @@ static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
7149 struct sched_domain *sd; 7143 struct sched_domain *sd;
7150 sd = sd_init_CPU(d, i); 7144 sd = sd_init_CPU(d, i);
7151 set_domain_attribute(sd, attr); 7145 set_domain_attribute(sd, attr);
7152 cpumask_copy(sched_domain_span(sd), d->nodemask); 7146 cpumask_and(sched_domain_span(sd),
7147 cpumask_of_node(cpu_to_node(i)), cpu_map);
7153 sd->parent = parent; 7148 sd->parent = parent;
7154 if (parent) 7149 if (parent)
7155 parent->child = sd; 7150 parent->child = sd;
@@ -7219,9 +7214,6 @@ static int build_sched_domains(const struct cpumask *cpu_map,
7219 7214
7220 /* Set up domains for cpus specified by the cpu_map. */ 7215 /* Set up domains for cpus specified by the cpu_map. */
7221 for_each_cpu(i, cpu_map) { 7216 for_each_cpu(i, cpu_map) {
7222 cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
7223 cpu_map);
7224
7225 sd = NULL; 7217 sd = NULL;
7226 sd = __build_allnodes_sched_domain(&d, cpu_map, attr, sd, i); 7218 sd = __build_allnodes_sched_domain(&d, cpu_map, attr, sd, i);
7227 sd = __build_node_sched_domain(&d, cpu_map, attr, sd, i); 7219 sd = __build_node_sched_domain(&d, cpu_map, attr, sd, i);