aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d16c8d9fbd8b..591d5e7f757a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6774,9 +6774,9 @@ static int find_next_best_node(int node, nodemask_t *used_nodes)
6774 6774
6775 min_val = INT_MAX; 6775 min_val = INT_MAX;
6776 6776
6777 for (i = 0; i < MAX_NUMNODES; i++) { 6777 for (i = 0; i < nr_node_ids; i++) {
6778 /* Start at @node */ 6778 /* Start at @node */
6779 n = (node + i) % MAX_NUMNODES; 6779 n = (node + i) % nr_node_ids;
6780 6780
6781 if (!nr_cpus_node(n)) 6781 if (!nr_cpus_node(n))
6782 continue; 6782 continue;
@@ -6970,7 +6970,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
6970 if (!sched_group_nodes) 6970 if (!sched_group_nodes)
6971 continue; 6971 continue;
6972 6972
6973 for (i = 0; i < MAX_NUMNODES; i++) { 6973 for (i = 0; i < nr_node_ids; i++) {
6974 struct sched_group *oldsg, *sg = sched_group_nodes[i]; 6974 struct sched_group *oldsg, *sg = sched_group_nodes[i];
6975 6975
6976 *nodemask = node_to_cpumask(i); 6976 *nodemask = node_to_cpumask(i);
@@ -7163,7 +7163,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7163 /* 7163 /*
7164 * Allocate the per-node list of sched groups 7164 * Allocate the per-node list of sched groups
7165 */ 7165 */
7166 sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *), 7166 sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
7167 GFP_KERNEL); 7167 GFP_KERNEL);
7168 if (!sched_group_nodes) { 7168 if (!sched_group_nodes) {
7169 printk(KERN_WARNING "Can not alloc sched group node list\n"); 7169 printk(KERN_WARNING "Can not alloc sched group node list\n");
@@ -7302,7 +7302,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7302#endif 7302#endif
7303 7303
7304 /* Set up physical groups */ 7304 /* Set up physical groups */
7305 for (i = 0; i < MAX_NUMNODES; i++) { 7305 for (i = 0; i < nr_node_ids; i++) {
7306 SCHED_CPUMASK_VAR(nodemask, allmasks); 7306 SCHED_CPUMASK_VAR(nodemask, allmasks);
7307 SCHED_CPUMASK_VAR(send_covered, allmasks); 7307 SCHED_CPUMASK_VAR(send_covered, allmasks);
7308 7308
@@ -7326,7 +7326,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7326 send_covered, tmpmask); 7326 send_covered, tmpmask);
7327 } 7327 }
7328 7328
7329 for (i = 0; i < MAX_NUMNODES; i++) { 7329 for (i = 0; i < nr_node_ids; i++) {
7330 /* Set up node groups */ 7330 /* Set up node groups */
7331 struct sched_group *sg, *prev; 7331 struct sched_group *sg, *prev;
7332 SCHED_CPUMASK_VAR(nodemask, allmasks); 7332 SCHED_CPUMASK_VAR(nodemask, allmasks);
@@ -7365,9 +7365,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7365 cpus_or(*covered, *covered, *nodemask); 7365 cpus_or(*covered, *covered, *nodemask);
7366 prev = sg; 7366 prev = sg;
7367 7367
7368 for (j = 0; j < MAX_NUMNODES; j++) { 7368 for (j = 0; j < nr_node_ids; j++) {
7369 SCHED_CPUMASK_VAR(notcovered, allmasks); 7369 SCHED_CPUMASK_VAR(notcovered, allmasks);
7370 int n = (i + j) % MAX_NUMNODES; 7370 int n = (i + j) % nr_node_ids;
7371 node_to_cpumask_ptr(pnodemask, n); 7371 node_to_cpumask_ptr(pnodemask, n);
7372 7372
7373 cpus_complement(*notcovered, *covered); 7373 cpus_complement(*notcovered, *covered);
@@ -7420,7 +7420,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7420 } 7420 }
7421 7421
7422#ifdef CONFIG_NUMA 7422#ifdef CONFIG_NUMA
7423 for (i = 0; i < MAX_NUMNODES; i++) 7423 for (i = 0; i < nr_node_ids; i++)
7424 init_numa_sched_groups_power(sched_group_nodes[i]); 7424 init_numa_sched_groups_power(sched_group_nodes[i]);
7425 7425
7426 if (sd_allnodes) { 7426 if (sd_allnodes) {