aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c54
1 files changed, 27 insertions, 27 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 94ead43eda62..e6795e39c8ab 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1939,7 +1939,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
1939 /* Tally up the load of all CPUs in the group */ 1939 /* Tally up the load of all CPUs in the group */
1940 avg_load = 0; 1940 avg_load = 0;
1941 1941
1942 for_each_cpu_mask(i, group->cpumask) { 1942 for_each_cpu_mask_nr(i, group->cpumask) {
1943 /* Bias balancing toward cpus of our domain */ 1943 /* Bias balancing toward cpus of our domain */
1944 if (local_group) 1944 if (local_group)
1945 load = source_load(i, load_idx); 1945 load = source_load(i, load_idx);
@@ -1981,7 +1981,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
1981 /* Traverse only the allowed CPUs */ 1981 /* Traverse only the allowed CPUs */
1982 cpus_and(*tmp, group->cpumask, p->cpus_allowed); 1982 cpus_and(*tmp, group->cpumask, p->cpus_allowed);
1983 1983
1984 for_each_cpu_mask(i, *tmp) { 1984 for_each_cpu_mask_nr(i, *tmp) {
1985 load = weighted_cpuload(i); 1985 load = weighted_cpuload(i);
1986 1986
1987 if (load < min_load || (load == min_load && i == this_cpu)) { 1987 if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -2964,7 +2964,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2964 max_cpu_load = 0; 2964 max_cpu_load = 0;
2965 min_cpu_load = ~0UL; 2965 min_cpu_load = ~0UL;
2966 2966
2967 for_each_cpu_mask(i, group->cpumask) { 2967 for_each_cpu_mask_nr(i, group->cpumask) {
2968 struct rq *rq; 2968 struct rq *rq;
2969 2969
2970 if (!cpu_isset(i, *cpus)) 2970 if (!cpu_isset(i, *cpus))
@@ -3228,7 +3228,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3228 unsigned long max_load = 0; 3228 unsigned long max_load = 0;
3229 int i; 3229 int i;
3230 3230
3231 for_each_cpu_mask(i, group->cpumask) { 3231 for_each_cpu_mask_nr(i, group->cpumask) {
3232 unsigned long wl; 3232 unsigned long wl;
3233 3233
3234 if (!cpu_isset(i, *cpus)) 3234 if (!cpu_isset(i, *cpus))
@@ -3759,7 +3759,7 @@ static void run_rebalance_domains(struct softirq_action *h)
3759 int balance_cpu; 3759 int balance_cpu;
3760 3760
3761 cpu_clear(this_cpu, cpus); 3761 cpu_clear(this_cpu, cpus);
3762 for_each_cpu_mask(balance_cpu, cpus) { 3762 for_each_cpu_mask_nr(balance_cpu, cpus) {
3763 /* 3763 /*
3764 * If this cpu gets work to do, stop the load balancing 3764 * If this cpu gets work to do, stop the load balancing
3765 * work being done for other cpus. Next load 3765 * work being done for other cpus. Next load
@@ -6491,7 +6491,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6491 6491
6492 cpus_clear(*covered); 6492 cpus_clear(*covered);
6493 6493
6494 for_each_cpu_mask(i, *span) { 6494 for_each_cpu_mask_nr(i, *span) {
6495 struct sched_group *sg; 6495 struct sched_group *sg;
6496 int group = group_fn(i, cpu_map, &sg, tmpmask); 6496 int group = group_fn(i, cpu_map, &sg, tmpmask);
6497 int j; 6497 int j;
@@ -6502,7 +6502,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6502 cpus_clear(sg->cpumask); 6502 cpus_clear(sg->cpumask);
6503 sg->__cpu_power = 0; 6503 sg->__cpu_power = 0;
6504 6504
6505 for_each_cpu_mask(j, *span) { 6505 for_each_cpu_mask_nr(j, *span) {
6506 if (group_fn(j, cpu_map, NULL, tmpmask) != group) 6506 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
6507 continue; 6507 continue;
6508 6508
@@ -6538,9 +6538,9 @@ static int find_next_best_node(int node, nodemask_t *used_nodes)
6538 6538
6539 min_val = INT_MAX; 6539 min_val = INT_MAX;
6540 6540
6541 for (i = 0; i < MAX_NUMNODES; i++) { 6541 for (i = 0; i < nr_node_ids; i++) {
6542 /* Start at @node */ 6542 /* Start at @node */
6543 n = (node + i) % MAX_NUMNODES; 6543 n = (node + i) % nr_node_ids;
6544 6544
6545 if (!nr_cpus_node(n)) 6545 if (!nr_cpus_node(n))
6546 continue; 6546 continue;
@@ -6702,7 +6702,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
6702 if (!sg) 6702 if (!sg)
6703 return; 6703 return;
6704 do { 6704 do {
6705 for_each_cpu_mask(j, sg->cpumask) { 6705 for_each_cpu_mask_nr(j, sg->cpumask) {
6706 struct sched_domain *sd; 6706 struct sched_domain *sd;
6707 6707
6708 sd = &per_cpu(phys_domains, j); 6708 sd = &per_cpu(phys_domains, j);
@@ -6727,14 +6727,14 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
6727{ 6727{
6728 int cpu, i; 6728 int cpu, i;
6729 6729
6730 for_each_cpu_mask(cpu, *cpu_map) { 6730 for_each_cpu_mask_nr(cpu, *cpu_map) {
6731 struct sched_group **sched_group_nodes 6731 struct sched_group **sched_group_nodes
6732 = sched_group_nodes_bycpu[cpu]; 6732 = sched_group_nodes_bycpu[cpu];
6733 6733
6734 if (!sched_group_nodes) 6734 if (!sched_group_nodes)
6735 continue; 6735 continue;
6736 6736
6737 for (i = 0; i < MAX_NUMNODES; i++) { 6737 for (i = 0; i < nr_node_ids; i++) {
6738 struct sched_group *oldsg, *sg = sched_group_nodes[i]; 6738 struct sched_group *oldsg, *sg = sched_group_nodes[i];
6739 6739
6740 *nodemask = node_to_cpumask(i); 6740 *nodemask = node_to_cpumask(i);
@@ -6927,7 +6927,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
6927 /* 6927 /*
6928 * Allocate the per-node list of sched groups 6928 * Allocate the per-node list of sched groups
6929 */ 6929 */
6930 sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *), 6930 sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
6931 GFP_KERNEL); 6931 GFP_KERNEL);
6932 if (!sched_group_nodes) { 6932 if (!sched_group_nodes) {
6933 printk(KERN_WARNING "Can not alloc sched group node list\n"); 6933 printk(KERN_WARNING "Can not alloc sched group node list\n");
@@ -6966,7 +6966,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
6966 /* 6966 /*
6967 * Set up domains for cpus specified by the cpu_map. 6967 * Set up domains for cpus specified by the cpu_map.
6968 */ 6968 */
6969 for_each_cpu_mask(i, *cpu_map) { 6969 for_each_cpu_mask_nr(i, *cpu_map) {
6970 struct sched_domain *sd = NULL, *p; 6970 struct sched_domain *sd = NULL, *p;
6971 SCHED_CPUMASK_VAR(nodemask, allmasks); 6971 SCHED_CPUMASK_VAR(nodemask, allmasks);
6972 6972
@@ -7033,7 +7033,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7033 7033
7034#ifdef CONFIG_SCHED_SMT 7034#ifdef CONFIG_SCHED_SMT
7035 /* Set up CPU (sibling) groups */ 7035 /* Set up CPU (sibling) groups */
7036 for_each_cpu_mask(i, *cpu_map) { 7036 for_each_cpu_mask_nr(i, *cpu_map) {
7037 SCHED_CPUMASK_VAR(this_sibling_map, allmasks); 7037 SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
7038 SCHED_CPUMASK_VAR(send_covered, allmasks); 7038 SCHED_CPUMASK_VAR(send_covered, allmasks);
7039 7039
@@ -7050,7 +7050,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7050 7050
7051#ifdef CONFIG_SCHED_MC 7051#ifdef CONFIG_SCHED_MC
7052 /* Set up multi-core groups */ 7052 /* Set up multi-core groups */
7053 for_each_cpu_mask(i, *cpu_map) { 7053 for_each_cpu_mask_nr(i, *cpu_map) {
7054 SCHED_CPUMASK_VAR(this_core_map, allmasks); 7054 SCHED_CPUMASK_VAR(this_core_map, allmasks);
7055 SCHED_CPUMASK_VAR(send_covered, allmasks); 7055 SCHED_CPUMASK_VAR(send_covered, allmasks);
7056 7056
@@ -7066,7 +7066,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7066#endif 7066#endif
7067 7067
7068 /* Set up physical groups */ 7068 /* Set up physical groups */
7069 for (i = 0; i < MAX_NUMNODES; i++) { 7069 for (i = 0; i < nr_node_ids; i++) {
7070 SCHED_CPUMASK_VAR(nodemask, allmasks); 7070 SCHED_CPUMASK_VAR(nodemask, allmasks);
7071 SCHED_CPUMASK_VAR(send_covered, allmasks); 7071 SCHED_CPUMASK_VAR(send_covered, allmasks);
7072 7072
@@ -7090,7 +7090,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7090 send_covered, tmpmask); 7090 send_covered, tmpmask);
7091 } 7091 }
7092 7092
7093 for (i = 0; i < MAX_NUMNODES; i++) { 7093 for (i = 0; i < nr_node_ids; i++) {
7094 /* Set up node groups */ 7094 /* Set up node groups */
7095 struct sched_group *sg, *prev; 7095 struct sched_group *sg, *prev;
7096 SCHED_CPUMASK_VAR(nodemask, allmasks); 7096 SCHED_CPUMASK_VAR(nodemask, allmasks);
@@ -7117,7 +7117,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7117 goto error; 7117 goto error;
7118 } 7118 }
7119 sched_group_nodes[i] = sg; 7119 sched_group_nodes[i] = sg;
7120 for_each_cpu_mask(j, *nodemask) { 7120 for_each_cpu_mask_nr(j, *nodemask) {
7121 struct sched_domain *sd; 7121 struct sched_domain *sd;
7122 7122
7123 sd = &per_cpu(node_domains, j); 7123 sd = &per_cpu(node_domains, j);
@@ -7129,9 +7129,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7129 cpus_or(*covered, *covered, *nodemask); 7129 cpus_or(*covered, *covered, *nodemask);
7130 prev = sg; 7130 prev = sg;
7131 7131
7132 for (j = 0; j < MAX_NUMNODES; j++) { 7132 for (j = 0; j < nr_node_ids; j++) {
7133 SCHED_CPUMASK_VAR(notcovered, allmasks); 7133 SCHED_CPUMASK_VAR(notcovered, allmasks);
7134 int n = (i + j) % MAX_NUMNODES; 7134 int n = (i + j) % nr_node_ids;
7135 node_to_cpumask_ptr(pnodemask, n); 7135 node_to_cpumask_ptr(pnodemask, n);
7136 7136
7137 cpus_complement(*notcovered, *covered); 7137 cpus_complement(*notcovered, *covered);
@@ -7163,28 +7163,28 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7163 7163
7164 /* Calculate CPU power for physical packages and nodes */ 7164 /* Calculate CPU power for physical packages and nodes */
7165#ifdef CONFIG_SCHED_SMT 7165#ifdef CONFIG_SCHED_SMT
7166 for_each_cpu_mask(i, *cpu_map) { 7166 for_each_cpu_mask_nr(i, *cpu_map) {
7167 struct sched_domain *sd = &per_cpu(cpu_domains, i); 7167 struct sched_domain *sd = &per_cpu(cpu_domains, i);
7168 7168
7169 init_sched_groups_power(i, sd); 7169 init_sched_groups_power(i, sd);
7170 } 7170 }
7171#endif 7171#endif
7172#ifdef CONFIG_SCHED_MC 7172#ifdef CONFIG_SCHED_MC
7173 for_each_cpu_mask(i, *cpu_map) { 7173 for_each_cpu_mask_nr(i, *cpu_map) {
7174 struct sched_domain *sd = &per_cpu(core_domains, i); 7174 struct sched_domain *sd = &per_cpu(core_domains, i);
7175 7175
7176 init_sched_groups_power(i, sd); 7176 init_sched_groups_power(i, sd);
7177 } 7177 }
7178#endif 7178#endif
7179 7179
7180 for_each_cpu_mask(i, *cpu_map) { 7180 for_each_cpu_mask_nr(i, *cpu_map) {
7181 struct sched_domain *sd = &per_cpu(phys_domains, i); 7181 struct sched_domain *sd = &per_cpu(phys_domains, i);
7182 7182
7183 init_sched_groups_power(i, sd); 7183 init_sched_groups_power(i, sd);
7184 } 7184 }
7185 7185
7186#ifdef CONFIG_NUMA 7186#ifdef CONFIG_NUMA
7187 for (i = 0; i < MAX_NUMNODES; i++) 7187 for (i = 0; i < nr_node_ids; i++)
7188 init_numa_sched_groups_power(sched_group_nodes[i]); 7188 init_numa_sched_groups_power(sched_group_nodes[i]);
7189 7189
7190 if (sd_allnodes) { 7190 if (sd_allnodes) {
@@ -7197,7 +7197,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7197#endif 7197#endif
7198 7198
7199 /* Attach the domains */ 7199 /* Attach the domains */
7200 for_each_cpu_mask(i, *cpu_map) { 7200 for_each_cpu_mask_nr(i, *cpu_map) {
7201 struct sched_domain *sd; 7201 struct sched_domain *sd;
7202#ifdef CONFIG_SCHED_SMT 7202#ifdef CONFIG_SCHED_SMT
7203 sd = &per_cpu(cpu_domains, i); 7203 sd = &per_cpu(cpu_domains, i);
@@ -7292,7 +7292,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
7292 7292
7293 unregister_sched_domain_sysctl(); 7293 unregister_sched_domain_sysctl();
7294 7294
7295 for_each_cpu_mask(i, *cpu_map) 7295 for_each_cpu_mask_nr(i, *cpu_map)
7296 cpu_attach_domain(NULL, &def_root_domain, i); 7296 cpu_attach_domain(NULL, &def_root_domain, i);
7297 synchronize_sched(); 7297 synchronize_sched();
7298 arch_destroy_sched_domains(cpu_map, &tmpmask); 7298 arch_destroy_sched_domains(cpu_map, &tmpmask);