aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 4e2f60335656..8402944f715b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6539,9 +6539,9 @@ static int find_next_best_node(int node, nodemask_t *used_nodes)
6539 6539
6540 min_val = INT_MAX; 6540 min_val = INT_MAX;
6541 6541
6542 for (i = 0; i < MAX_NUMNODES; i++) { 6542 for (i = 0; i < nr_node_ids; i++) {
6543 /* Start at @node */ 6543 /* Start at @node */
6544 n = (node + i) % MAX_NUMNODES; 6544 n = (node + i) % nr_node_ids;
6545 6545
6546 if (!nr_cpus_node(n)) 6546 if (!nr_cpus_node(n))
6547 continue; 6547 continue;
@@ -6735,7 +6735,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
6735 if (!sched_group_nodes) 6735 if (!sched_group_nodes)
6736 continue; 6736 continue;
6737 6737
6738 for (i = 0; i < MAX_NUMNODES; i++) { 6738 for (i = 0; i < nr_node_ids; i++) {
6739 struct sched_group *oldsg, *sg = sched_group_nodes[i]; 6739 struct sched_group *oldsg, *sg = sched_group_nodes[i];
6740 6740
6741 *nodemask = node_to_cpumask(i); 6741 *nodemask = node_to_cpumask(i);
@@ -6928,7 +6928,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
6928 /* 6928 /*
6929 * Allocate the per-node list of sched groups 6929 * Allocate the per-node list of sched groups
6930 */ 6930 */
6931 sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *), 6931 sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
6932 GFP_KERNEL); 6932 GFP_KERNEL);
6933 if (!sched_group_nodes) { 6933 if (!sched_group_nodes) {
6934 printk(KERN_WARNING "Can not alloc sched group node list\n"); 6934 printk(KERN_WARNING "Can not alloc sched group node list\n");
@@ -7067,7 +7067,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7067#endif 7067#endif
7068 7068
7069 /* Set up physical groups */ 7069 /* Set up physical groups */
7070 for (i = 0; i < MAX_NUMNODES; i++) { 7070 for (i = 0; i < nr_node_ids; i++) {
7071 SCHED_CPUMASK_VAR(nodemask, allmasks); 7071 SCHED_CPUMASK_VAR(nodemask, allmasks);
7072 SCHED_CPUMASK_VAR(send_covered, allmasks); 7072 SCHED_CPUMASK_VAR(send_covered, allmasks);
7073 7073
@@ -7091,7 +7091,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7091 send_covered, tmpmask); 7091 send_covered, tmpmask);
7092 } 7092 }
7093 7093
7094 for (i = 0; i < MAX_NUMNODES; i++) { 7094 for (i = 0; i < nr_node_ids; i++) {
7095 /* Set up node groups */ 7095 /* Set up node groups */
7096 struct sched_group *sg, *prev; 7096 struct sched_group *sg, *prev;
7097 SCHED_CPUMASK_VAR(nodemask, allmasks); 7097 SCHED_CPUMASK_VAR(nodemask, allmasks);
@@ -7130,9 +7130,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7130 cpus_or(*covered, *covered, *nodemask); 7130 cpus_or(*covered, *covered, *nodemask);
7131 prev = sg; 7131 prev = sg;
7132 7132
7133 for (j = 0; j < MAX_NUMNODES; j++) { 7133 for (j = 0; j < nr_node_ids; j++) {
7134 SCHED_CPUMASK_VAR(notcovered, allmasks); 7134 SCHED_CPUMASK_VAR(notcovered, allmasks);
7135 int n = (i + j) % MAX_NUMNODES; 7135 int n = (i + j) % nr_node_ids;
7136 node_to_cpumask_ptr(pnodemask, n); 7136 node_to_cpumask_ptr(pnodemask, n);
7137 7137
7138 cpus_complement(*notcovered, *covered); 7138 cpus_complement(*notcovered, *covered);
@@ -7185,7 +7185,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7185 } 7185 }
7186 7186
7187#ifdef CONFIG_NUMA 7187#ifdef CONFIG_NUMA
7188 for (i = 0; i < MAX_NUMNODES; i++) 7188 for (i = 0; i < nr_node_ids; i++)
7189 init_numa_sched_groups_power(sched_group_nodes[i]); 7189 init_numa_sched_groups_power(sched_group_nodes[i]);
7190 7190
7191 if (sd_allnodes) { 7191 if (sd_allnodes) {