diff options
author | Mike Travis <travis@sgi.com> | 2008-04-15 19:35:52 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-19 13:44:59 -0400 |
commit | 4bdbaad33d0f4d0e9818a38a825f5b75c0296a28 (patch) | |
tree | 36d8ac15fb3d2d4ccb939327a786f8327a403b98 | |
parent | 9d1fe3236a1d64ab687e16b4cbbaa1383352a2c1 (diff) |
sched: remove another cpumask_t variable from stack
* Remove another cpumask_t variable from stack that was missed in the
last kernel_sched_c updates.
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/sched.c | 15 |
1 files changed, 6 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 6809178eaa9d..b56d98b01267 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6501,27 +6501,24 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) | |||
6501 | * should be one that prevents unnecessary balancing, but also spreads tasks | 6501 | * should be one that prevents unnecessary balancing, but also spreads tasks |
6502 | * out optimally. | 6502 | * out optimally. |
6503 | */ | 6503 | */ |
6504 | static cpumask_t sched_domain_node_span(int node) | 6504 | static void sched_domain_node_span(int node, cpumask_t *span) |
6505 | { | 6505 | { |
6506 | nodemask_t used_nodes; | 6506 | nodemask_t used_nodes; |
6507 | cpumask_t span; | ||
6508 | node_to_cpumask_ptr(nodemask, node); | 6507 | node_to_cpumask_ptr(nodemask, node); |
6509 | int i; | 6508 | int i; |
6510 | 6509 | ||
6511 | cpus_clear(span); | 6510 | cpus_clear(*span); |
6512 | nodes_clear(used_nodes); | 6511 | nodes_clear(used_nodes); |
6513 | 6512 | ||
6514 | cpus_or(span, span, *nodemask); | 6513 | cpus_or(*span, *span, *nodemask); |
6515 | node_set(node, used_nodes); | 6514 | node_set(node, used_nodes); |
6516 | 6515 | ||
6517 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { | 6516 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { |
6518 | int next_node = find_next_best_node(node, &used_nodes); | 6517 | int next_node = find_next_best_node(node, &used_nodes); |
6519 | 6518 | ||
6520 | node_to_cpumask_ptr_next(nodemask, next_node); | 6519 | node_to_cpumask_ptr_next(nodemask, next_node); |
6521 | cpus_or(span, span, *nodemask); | 6520 | cpus_or(*span, *span, *nodemask); |
6522 | } | 6521 | } |
6523 | |||
6524 | return span; | ||
6525 | } | 6522 | } |
6526 | #endif | 6523 | #endif |
6527 | 6524 | ||
@@ -6883,7 +6880,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
6883 | 6880 | ||
6884 | sd = &per_cpu(node_domains, i); | 6881 | sd = &per_cpu(node_domains, i); |
6885 | SD_INIT(sd, NODE); | 6882 | SD_INIT(sd, NODE); |
6886 | sd->span = sched_domain_node_span(cpu_to_node(i)); | 6883 | sched_domain_node_span(cpu_to_node(i), &sd->span); |
6887 | sd->parent = p; | 6884 | sd->parent = p; |
6888 | if (p) | 6885 | if (p) |
6889 | p->child = sd; | 6886 | p->child = sd; |
@@ -6998,7 +6995,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
6998 | continue; | 6995 | continue; |
6999 | } | 6996 | } |
7000 | 6997 | ||
7001 | *domainspan = sched_domain_node_span(i); | 6998 | sched_domain_node_span(i, domainspan); |
7002 | cpus_and(*domainspan, *domainspan, *cpu_map); | 6999 | cpus_and(*domainspan, *domainspan, *cpu_map); |
7003 | 7000 | ||
7004 | sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); | 7001 | sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); |