aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-05-12 15:21:12 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 12:22:17 -0400
commita953e4597abd51b74c99e0e3b7074532a60fd031 (patch)
treec5dc78de79b0b9ff910beff4c2fa156c2760adf3 /kernel
parent75d3bce2fc0a80f435fe12f2c9ed2632c8ac29e4 (diff)
sched: replace MAX_NUMNODES with nr_node_ids in kernel/sched.c
* Replace usages of MAX_NUMNODES with nr_node_ids in kernel/sched.c, where appropriate. This saves some allocated space as well as many wasted cycles going through node entries that are non-existent. For inclusion into sched-devel/latest tree. Based on: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git + sched-devel/latest .../mingo/linux-2.6-sched-devel.git Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index cfa222a91539..1ed8011db826 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6879,9 +6879,9 @@ static int find_next_best_node(int node, nodemask_t *used_nodes)
6879 6879
6880 min_val = INT_MAX; 6880 min_val = INT_MAX;
6881 6881
6882 for (i = 0; i < MAX_NUMNODES; i++) { 6882 for (i = 0; i < nr_node_ids; i++) {
6883 /* Start at @node */ 6883 /* Start at @node */
6884 n = (node + i) % MAX_NUMNODES; 6884 n = (node + i) % nr_node_ids;
6885 6885
6886 if (!nr_cpus_node(n)) 6886 if (!nr_cpus_node(n))
6887 continue; 6887 continue;
@@ -7075,7 +7075,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
7075 if (!sched_group_nodes) 7075 if (!sched_group_nodes)
7076 continue; 7076 continue;
7077 7077
7078 for (i = 0; i < MAX_NUMNODES; i++) { 7078 for (i = 0; i < nr_node_ids; i++) {
7079 struct sched_group *oldsg, *sg = sched_group_nodes[i]; 7079 struct sched_group *oldsg, *sg = sched_group_nodes[i];
7080 7080
7081 *nodemask = node_to_cpumask(i); 7081 *nodemask = node_to_cpumask(i);
@@ -7263,7 +7263,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7263 /* 7263 /*
7264 * Allocate the per-node list of sched groups 7264 * Allocate the per-node list of sched groups
7265 */ 7265 */
7266 sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *), 7266 sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
7267 GFP_KERNEL); 7267 GFP_KERNEL);
7268 if (!sched_group_nodes) { 7268 if (!sched_group_nodes) {
7269 printk(KERN_WARNING "Can not alloc sched group node list\n"); 7269 printk(KERN_WARNING "Can not alloc sched group node list\n");
@@ -7407,7 +7407,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7407#endif 7407#endif
7408 7408
7409 /* Set up physical groups */ 7409 /* Set up physical groups */
7410 for (i = 0; i < MAX_NUMNODES; i++) { 7410 for (i = 0; i < nr_node_ids; i++) {
7411 SCHED_CPUMASK_VAR(nodemask, allmasks); 7411 SCHED_CPUMASK_VAR(nodemask, allmasks);
7412 SCHED_CPUMASK_VAR(send_covered, allmasks); 7412 SCHED_CPUMASK_VAR(send_covered, allmasks);
7413 7413
@@ -7431,7 +7431,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7431 send_covered, tmpmask); 7431 send_covered, tmpmask);
7432 } 7432 }
7433 7433
7434 for (i = 0; i < MAX_NUMNODES; i++) { 7434 for (i = 0; i < nr_node_ids; i++) {
7435 /* Set up node groups */ 7435 /* Set up node groups */
7436 struct sched_group *sg, *prev; 7436 struct sched_group *sg, *prev;
7437 SCHED_CPUMASK_VAR(nodemask, allmasks); 7437 SCHED_CPUMASK_VAR(nodemask, allmasks);
@@ -7470,9 +7470,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7470 cpus_or(*covered, *covered, *nodemask); 7470 cpus_or(*covered, *covered, *nodemask);
7471 prev = sg; 7471 prev = sg;
7472 7472
7473 for (j = 0; j < MAX_NUMNODES; j++) { 7473 for (j = 0; j < nr_node_ids; j++) {
7474 SCHED_CPUMASK_VAR(notcovered, allmasks); 7474 SCHED_CPUMASK_VAR(notcovered, allmasks);
7475 int n = (i + j) % MAX_NUMNODES; 7475 int n = (i + j) % nr_node_ids;
7476 node_to_cpumask_ptr(pnodemask, n); 7476 node_to_cpumask_ptr(pnodemask, n);
7477 7477
7478 cpus_complement(*notcovered, *covered); 7478 cpus_complement(*notcovered, *covered);
@@ -7525,7 +7525,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7525 } 7525 }
7526 7526
7527#ifdef CONFIG_NUMA 7527#ifdef CONFIG_NUMA
7528 for (i = 0; i < MAX_NUMNODES; i++) 7528 for (i = 0; i < nr_node_ids; i++)
7529 init_numa_sched_groups_power(sched_group_nodes[i]); 7529 init_numa_sched_groups_power(sched_group_nodes[i]);
7530 7530
7531 if (sd_allnodes) { 7531 if (sd_allnodes) {