aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-24 11:05:02 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-24 11:49:47 -0500
commitabcd083a1a658d2bc1f7fced02632bfe03918002 (patch)
treeba9fe810b1fc62c7f66f9b5fc17c6f3e6bcbd35b /kernel
parentea6f18ed5a1531caf678374f30a0990c9e6742f3 (diff)
sched: convert sched.c from for_each_cpu_mask to for_each_cpu.
Impact: trivial API conversion This is a simple conversion, but note that for_each_cpu() terminates with i >= nr_cpu_ids, not i == NR_CPUS like for_each_cpu_mask() did. I don't convert all of them: sd->span changes in a later patch, so change those iterators there rather than here. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index dd22cec499b8..e59978eead17 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2061,7 +2061,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2061 /* Tally up the load of all CPUs in the group */ 2061 /* Tally up the load of all CPUs in the group */
2062 avg_load = 0; 2062 avg_load = 0;
2063 2063
2064 for_each_cpu_mask_nr(i, group->cpumask) { 2064 for_each_cpu(i, &group->cpumask) {
2065 /* Bias balancing toward cpus of our domain */ 2065 /* Bias balancing toward cpus of our domain */
2066 if (local_group) 2066 if (local_group)
2067 load = source_load(i, load_idx); 2067 load = source_load(i, load_idx);
@@ -2103,7 +2103,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
2103 /* Traverse only the allowed CPUs */ 2103 /* Traverse only the allowed CPUs */
2104 cpus_and(*tmp, group->cpumask, p->cpus_allowed); 2104 cpus_and(*tmp, group->cpumask, p->cpus_allowed);
2105 2105
2106 for_each_cpu_mask_nr(i, *tmp) { 2106 for_each_cpu(i, tmp) {
2107 load = weighted_cpuload(i); 2107 load = weighted_cpuload(i);
2108 2108
2109 if (load < min_load || (load == min_load && i == this_cpu)) { 2109 if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -3121,7 +3121,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3121 max_cpu_load = 0; 3121 max_cpu_load = 0;
3122 min_cpu_load = ~0UL; 3122 min_cpu_load = ~0UL;
3123 3123
3124 for_each_cpu_mask_nr(i, group->cpumask) { 3124 for_each_cpu(i, &group->cpumask) {
3125 struct rq *rq; 3125 struct rq *rq;
3126 3126
3127 if (!cpu_isset(i, *cpus)) 3127 if (!cpu_isset(i, *cpus))
@@ -3400,7 +3400,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3400 unsigned long max_load = 0; 3400 unsigned long max_load = 0;
3401 int i; 3401 int i;
3402 3402
3403 for_each_cpu_mask_nr(i, group->cpumask) { 3403 for_each_cpu(i, &group->cpumask) {
3404 unsigned long wl; 3404 unsigned long wl;
3405 3405
3406 if (!cpu_isset(i, *cpus)) 3406 if (!cpu_isset(i, *cpus))
@@ -3942,7 +3942,7 @@ static void run_rebalance_domains(struct softirq_action *h)
3942 int balance_cpu; 3942 int balance_cpu;
3943 3943
3944 cpu_clear(this_cpu, cpus); 3944 cpu_clear(this_cpu, cpus);
3945 for_each_cpu_mask_nr(balance_cpu, cpus) { 3945 for_each_cpu(balance_cpu, &cpus) {
3946 /* 3946 /*
3947 * If this cpu gets work to do, stop the load balancing 3947 * If this cpu gets work to do, stop the load balancing
3948 * work being done for other cpus. Next load 3948 * work being done for other cpus. Next load
@@ -6906,7 +6906,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6906 6906
6907 cpus_clear(*covered); 6907 cpus_clear(*covered);
6908 6908
6909 for_each_cpu_mask_nr(i, *span) { 6909 for_each_cpu(i, span) {
6910 struct sched_group *sg; 6910 struct sched_group *sg;
6911 int group = group_fn(i, cpu_map, &sg, tmpmask); 6911 int group = group_fn(i, cpu_map, &sg, tmpmask);
6912 int j; 6912 int j;
@@ -6917,7 +6917,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6917 cpus_clear(sg->cpumask); 6917 cpus_clear(sg->cpumask);
6918 sg->__cpu_power = 0; 6918 sg->__cpu_power = 0;
6919 6919
6920 for_each_cpu_mask_nr(j, *span) { 6920 for_each_cpu(j, span) {
6921 if (group_fn(j, cpu_map, NULL, tmpmask) != group) 6921 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
6922 continue; 6922 continue;
6923 6923
@@ -7117,7 +7117,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
7117 if (!sg) 7117 if (!sg)
7118 return; 7118 return;
7119 do { 7119 do {
7120 for_each_cpu_mask_nr(j, sg->cpumask) { 7120 for_each_cpu(j, &sg->cpumask) {
7121 struct sched_domain *sd; 7121 struct sched_domain *sd;
7122 7122
7123 sd = &per_cpu(phys_domains, j); 7123 sd = &per_cpu(phys_domains, j);
@@ -7142,7 +7142,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
7142{ 7142{
7143 int cpu, i; 7143 int cpu, i;
7144 7144
7145 for_each_cpu_mask_nr(cpu, *cpu_map) { 7145 for_each_cpu(cpu, cpu_map) {
7146 struct sched_group **sched_group_nodes 7146 struct sched_group **sched_group_nodes
7147 = sched_group_nodes_bycpu[cpu]; 7147 = sched_group_nodes_bycpu[cpu];
7148 7148
@@ -7396,7 +7396,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7396 /* 7396 /*
7397 * Set up domains for cpus specified by the cpu_map. 7397 * Set up domains for cpus specified by the cpu_map.
7398 */ 7398 */
7399 for_each_cpu_mask_nr(i, *cpu_map) { 7399 for_each_cpu(i, cpu_map) {
7400 struct sched_domain *sd = NULL, *p; 7400 struct sched_domain *sd = NULL, *p;
7401 SCHED_CPUMASK_VAR(nodemask, allmasks); 7401 SCHED_CPUMASK_VAR(nodemask, allmasks);
7402 7402
@@ -7463,7 +7463,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7463 7463
7464#ifdef CONFIG_SCHED_SMT 7464#ifdef CONFIG_SCHED_SMT
7465 /* Set up CPU (sibling) groups */ 7465 /* Set up CPU (sibling) groups */
7466 for_each_cpu_mask_nr(i, *cpu_map) { 7466 for_each_cpu(i, cpu_map) {
7467 SCHED_CPUMASK_VAR(this_sibling_map, allmasks); 7467 SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
7468 SCHED_CPUMASK_VAR(send_covered, allmasks); 7468 SCHED_CPUMASK_VAR(send_covered, allmasks);
7469 7469
@@ -7480,7 +7480,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7480 7480
7481#ifdef CONFIG_SCHED_MC 7481#ifdef CONFIG_SCHED_MC
7482 /* Set up multi-core groups */ 7482 /* Set up multi-core groups */
7483 for_each_cpu_mask_nr(i, *cpu_map) { 7483 for_each_cpu(i, cpu_map) {
7484 SCHED_CPUMASK_VAR(this_core_map, allmasks); 7484 SCHED_CPUMASK_VAR(this_core_map, allmasks);
7485 SCHED_CPUMASK_VAR(send_covered, allmasks); 7485 SCHED_CPUMASK_VAR(send_covered, allmasks);
7486 7486
@@ -7547,7 +7547,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7547 goto error; 7547 goto error;
7548 } 7548 }
7549 sched_group_nodes[i] = sg; 7549 sched_group_nodes[i] = sg;
7550 for_each_cpu_mask_nr(j, *nodemask) { 7550 for_each_cpu(j, nodemask) {
7551 struct sched_domain *sd; 7551 struct sched_domain *sd;
7552 7552
7553 sd = &per_cpu(node_domains, j); 7553 sd = &per_cpu(node_domains, j);
@@ -7593,21 +7593,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7593 7593
7594 /* Calculate CPU power for physical packages and nodes */ 7594 /* Calculate CPU power for physical packages and nodes */
7595#ifdef CONFIG_SCHED_SMT 7595#ifdef CONFIG_SCHED_SMT
7596 for_each_cpu_mask_nr(i, *cpu_map) { 7596 for_each_cpu(i, cpu_map) {
7597 struct sched_domain *sd = &per_cpu(cpu_domains, i); 7597 struct sched_domain *sd = &per_cpu(cpu_domains, i);
7598 7598
7599 init_sched_groups_power(i, sd); 7599 init_sched_groups_power(i, sd);
7600 } 7600 }
7601#endif 7601#endif
7602#ifdef CONFIG_SCHED_MC 7602#ifdef CONFIG_SCHED_MC
7603 for_each_cpu_mask_nr(i, *cpu_map) { 7603 for_each_cpu(i, cpu_map) {
7604 struct sched_domain *sd = &per_cpu(core_domains, i); 7604 struct sched_domain *sd = &per_cpu(core_domains, i);
7605 7605
7606 init_sched_groups_power(i, sd); 7606 init_sched_groups_power(i, sd);
7607 } 7607 }
7608#endif 7608#endif
7609 7609
7610 for_each_cpu_mask_nr(i, *cpu_map) { 7610 for_each_cpu(i, cpu_map) {
7611 struct sched_domain *sd = &per_cpu(phys_domains, i); 7611 struct sched_domain *sd = &per_cpu(phys_domains, i);
7612 7612
7613 init_sched_groups_power(i, sd); 7613 init_sched_groups_power(i, sd);
@@ -7627,7 +7627,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7627#endif 7627#endif
7628 7628
7629 /* Attach the domains */ 7629 /* Attach the domains */
7630 for_each_cpu_mask_nr(i, *cpu_map) { 7630 for_each_cpu(i, cpu_map) {
7631 struct sched_domain *sd; 7631 struct sched_domain *sd;
7632#ifdef CONFIG_SCHED_SMT 7632#ifdef CONFIG_SCHED_SMT
7633 sd = &per_cpu(cpu_domains, i); 7633 sd = &per_cpu(cpu_domains, i);
@@ -7709,7 +7709,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
7709 cpumask_t tmpmask; 7709 cpumask_t tmpmask;
7710 int i; 7710 int i;
7711 7711
7712 for_each_cpu_mask_nr(i, *cpu_map) 7712 for_each_cpu(i, cpu_map)
7713 cpu_attach_domain(NULL, &def_root_domain, i); 7713 cpu_attach_domain(NULL, &def_root_domain, i);
7714 synchronize_sched(); 7714 synchronize_sched();
7715 arch_destroy_sched_domains(cpu_map, &tmpmask); 7715 arch_destroy_sched_domains(cpu_map, &tmpmask);