diff options
author | Mike Travis <travis@sgi.com> | 2008-05-12 15:21:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-08 05:31:30 -0400 |
commit | 076ac2af86c3b7f89ac31bc50a7508d3e035b786 (patch) | |
tree | 6952562a54dea9a1bef037ca30448f4f4d9bfae8 /kernel | |
parent | 886533a3e370a6d5c4e46819d1e14bd2f20dbb3a (diff) |
sched, numa: replace MAX_NUMNODES with nr_node_ids in kernel/sched.c
* Replace usages of MAX_NUMNODES with nr_node_ids in kernel/sched.c,
where appropriate. This saves some allocated space as well as many
wasted cycles going through node entries that are non-existent.
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 94ead43eda62..bcc22b569ee9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6538,9 +6538,9 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) | |||
6538 | 6538 | ||
6539 | min_val = INT_MAX; | 6539 | min_val = INT_MAX; |
6540 | 6540 | ||
6541 | for (i = 0; i < MAX_NUMNODES; i++) { | 6541 | for (i = 0; i < nr_node_ids; i++) { |
6542 | /* Start at @node */ | 6542 | /* Start at @node */ |
6543 | n = (node + i) % MAX_NUMNODES; | 6543 | n = (node + i) % nr_node_ids; |
6544 | 6544 | ||
6545 | if (!nr_cpus_node(n)) | 6545 | if (!nr_cpus_node(n)) |
6546 | continue; | 6546 | continue; |
@@ -6734,7 +6734,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | |||
6734 | if (!sched_group_nodes) | 6734 | if (!sched_group_nodes) |
6735 | continue; | 6735 | continue; |
6736 | 6736 | ||
6737 | for (i = 0; i < MAX_NUMNODES; i++) { | 6737 | for (i = 0; i < nr_node_ids; i++) { |
6738 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 6738 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
6739 | 6739 | ||
6740 | *nodemask = node_to_cpumask(i); | 6740 | *nodemask = node_to_cpumask(i); |
@@ -6927,7 +6927,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
6927 | /* | 6927 | /* |
6928 | * Allocate the per-node list of sched groups | 6928 | * Allocate the per-node list of sched groups |
6929 | */ | 6929 | */ |
6930 | sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *), | 6930 | sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *), |
6931 | GFP_KERNEL); | 6931 | GFP_KERNEL); |
6932 | if (!sched_group_nodes) { | 6932 | if (!sched_group_nodes) { |
6933 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | 6933 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
@@ -7066,7 +7066,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7066 | #endif | 7066 | #endif |
7067 | 7067 | ||
7068 | /* Set up physical groups */ | 7068 | /* Set up physical groups */ |
7069 | for (i = 0; i < MAX_NUMNODES; i++) { | 7069 | for (i = 0; i < nr_node_ids; i++) { |
7070 | SCHED_CPUMASK_VAR(nodemask, allmasks); | 7070 | SCHED_CPUMASK_VAR(nodemask, allmasks); |
7071 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7071 | SCHED_CPUMASK_VAR(send_covered, allmasks); |
7072 | 7072 | ||
@@ -7090,7 +7090,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7090 | send_covered, tmpmask); | 7090 | send_covered, tmpmask); |
7091 | } | 7091 | } |
7092 | 7092 | ||
7093 | for (i = 0; i < MAX_NUMNODES; i++) { | 7093 | for (i = 0; i < nr_node_ids; i++) { |
7094 | /* Set up node groups */ | 7094 | /* Set up node groups */ |
7095 | struct sched_group *sg, *prev; | 7095 | struct sched_group *sg, *prev; |
7096 | SCHED_CPUMASK_VAR(nodemask, allmasks); | 7096 | SCHED_CPUMASK_VAR(nodemask, allmasks); |
@@ -7129,9 +7129,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7129 | cpus_or(*covered, *covered, *nodemask); | 7129 | cpus_or(*covered, *covered, *nodemask); |
7130 | prev = sg; | 7130 | prev = sg; |
7131 | 7131 | ||
7132 | for (j = 0; j < MAX_NUMNODES; j++) { | 7132 | for (j = 0; j < nr_node_ids; j++) { |
7133 | SCHED_CPUMASK_VAR(notcovered, allmasks); | 7133 | SCHED_CPUMASK_VAR(notcovered, allmasks); |
7134 | int n = (i + j) % MAX_NUMNODES; | 7134 | int n = (i + j) % nr_node_ids; |
7135 | node_to_cpumask_ptr(pnodemask, n); | 7135 | node_to_cpumask_ptr(pnodemask, n); |
7136 | 7136 | ||
7137 | cpus_complement(*notcovered, *covered); | 7137 | cpus_complement(*notcovered, *covered); |
@@ -7184,7 +7184,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7184 | } | 7184 | } |
7185 | 7185 | ||
7186 | #ifdef CONFIG_NUMA | 7186 | #ifdef CONFIG_NUMA |
7187 | for (i = 0; i < MAX_NUMNODES; i++) | 7187 | for (i = 0; i < nr_node_ids; i++) |
7188 | init_numa_sched_groups_power(sched_group_nodes[i]); | 7188 | init_numa_sched_groups_power(sched_group_nodes[i]); |
7189 | 7189 | ||
7190 | if (sd_allnodes) { | 7190 | if (sd_allnodes) { |