aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-23 21:37:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-23 21:37:44 -0400
commit26dcce0fabbef75ae426461edf21b5030bad60f3 (patch)
tree56c64fa47dc29f7ea5a8fd0cab0459fb0a05a2bc /kernel/sched.c
parentd7b6de14a0ef8a376f9d57b867545b47302b7bfb (diff)
parenteb6a12c2428d21a9f3e0f1a50e927d5fd80fc3d0 (diff)
Merge branch 'cpus4096-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (31 commits) NR_CPUS: Replace NR_CPUS in speedstep-centrino.c cpumask: Provide a generic set of CPUMASK_ALLOC macros, FIXUP NR_CPUS: Replace NR_CPUS in cpufreq userspace routines NR_CPUS: Replace per_cpu(..., smp_processor_id()) with __get_cpu_var NR_CPUS: Replace NR_CPUS in arch/x86/kernel/genapic_flat_64.c NR_CPUS: Replace NR_CPUS in arch/x86/kernel/genx2apic_uv_x.c NR_CPUS: Replace NR_CPUS in arch/x86/kernel/cpu/proc.c NR_CPUS: Replace NR_CPUS in arch/x86/kernel/cpu/mcheck/mce_64.c cpumask: Optimize cpumask_of_cpu in lib/smp_processor_id.c, fix cpumask: Use optimized CPUMASK_ALLOC macros in the centrino_target cpumask: Provide a generic set of CPUMASK_ALLOC macros cpumask: Optimize cpumask_of_cpu in lib/smp_processor_id.c cpumask: Optimize cpumask_of_cpu in kernel/time/tick-common.c cpumask: Optimize cpumask_of_cpu in drivers/misc/sgi-xp/xpc_main.c cpumask: Optimize cpumask_of_cpu in arch/x86/kernel/ldt.c cpumask: Optimize cpumask_of_cpu in arch/x86/kernel/io_apic_64.c cpumask: Replace cpumask_of_cpu with cpumask_of_cpu_ptr Revert "cpumask: introduce new APIs" cpumask: make for_each_cpu_mask a bit smaller net: Pass reference to cpumask variable in net/sunrpc/svc.c ... Fix up trivial conflicts in drivers/cpufreq/cpufreq.c manually
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b1104ea5d255..df80bae68152 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2108,7 +2108,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2108 /* Tally up the load of all CPUs in the group */ 2108 /* Tally up the load of all CPUs in the group */
2109 avg_load = 0; 2109 avg_load = 0;
2110 2110
2111 for_each_cpu_mask(i, group->cpumask) { 2111 for_each_cpu_mask_nr(i, group->cpumask) {
2112 /* Bias balancing toward cpus of our domain */ 2112 /* Bias balancing toward cpus of our domain */
2113 if (local_group) 2113 if (local_group)
2114 load = source_load(i, load_idx); 2114 load = source_load(i, load_idx);
@@ -2150,7 +2150,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
2150 /* Traverse only the allowed CPUs */ 2150 /* Traverse only the allowed CPUs */
2151 cpus_and(*tmp, group->cpumask, p->cpus_allowed); 2151 cpus_and(*tmp, group->cpumask, p->cpus_allowed);
2152 2152
2153 for_each_cpu_mask(i, *tmp) { 2153 for_each_cpu_mask_nr(i, *tmp) {
2154 load = weighted_cpuload(i); 2154 load = weighted_cpuload(i);
2155 2155
2156 if (load < min_load || (load == min_load && i == this_cpu)) { 2156 if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -3168,7 +3168,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3168 max_cpu_load = 0; 3168 max_cpu_load = 0;
3169 min_cpu_load = ~0UL; 3169 min_cpu_load = ~0UL;
3170 3170
3171 for_each_cpu_mask(i, group->cpumask) { 3171 for_each_cpu_mask_nr(i, group->cpumask) {
3172 struct rq *rq; 3172 struct rq *rq;
3173 3173
3174 if (!cpu_isset(i, *cpus)) 3174 if (!cpu_isset(i, *cpus))
@@ -3447,7 +3447,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3447 unsigned long max_load = 0; 3447 unsigned long max_load = 0;
3448 int i; 3448 int i;
3449 3449
3450 for_each_cpu_mask(i, group->cpumask) { 3450 for_each_cpu_mask_nr(i, group->cpumask) {
3451 unsigned long wl; 3451 unsigned long wl;
3452 3452
3453 if (!cpu_isset(i, *cpus)) 3453 if (!cpu_isset(i, *cpus))
@@ -3989,7 +3989,7 @@ static void run_rebalance_domains(struct softirq_action *h)
3989 int balance_cpu; 3989 int balance_cpu;
3990 3990
3991 cpu_clear(this_cpu, cpus); 3991 cpu_clear(this_cpu, cpus);
3992 for_each_cpu_mask(balance_cpu, cpus) { 3992 for_each_cpu_mask_nr(balance_cpu, cpus) {
3993 /* 3993 /*
3994 * If this cpu gets work to do, stop the load balancing 3994 * If this cpu gets work to do, stop the load balancing
3995 * work being done for other cpus. Next load 3995 * work being done for other cpus. Next load
@@ -6802,7 +6802,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6802 6802
6803 cpus_clear(*covered); 6803 cpus_clear(*covered);
6804 6804
6805 for_each_cpu_mask(i, *span) { 6805 for_each_cpu_mask_nr(i, *span) {
6806 struct sched_group *sg; 6806 struct sched_group *sg;
6807 int group = group_fn(i, cpu_map, &sg, tmpmask); 6807 int group = group_fn(i, cpu_map, &sg, tmpmask);
6808 int j; 6808 int j;
@@ -6813,7 +6813,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6813 cpus_clear(sg->cpumask); 6813 cpus_clear(sg->cpumask);
6814 sg->__cpu_power = 0; 6814 sg->__cpu_power = 0;
6815 6815
6816 for_each_cpu_mask(j, *span) { 6816 for_each_cpu_mask_nr(j, *span) {
6817 if (group_fn(j, cpu_map, NULL, tmpmask) != group) 6817 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
6818 continue; 6818 continue;
6819 6819
@@ -7013,7 +7013,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
7013 if (!sg) 7013 if (!sg)
7014 return; 7014 return;
7015 do { 7015 do {
7016 for_each_cpu_mask(j, sg->cpumask) { 7016 for_each_cpu_mask_nr(j, sg->cpumask) {
7017 struct sched_domain *sd; 7017 struct sched_domain *sd;
7018 7018
7019 sd = &per_cpu(phys_domains, j); 7019 sd = &per_cpu(phys_domains, j);
@@ -7038,7 +7038,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
7038{ 7038{
7039 int cpu, i; 7039 int cpu, i;
7040 7040
7041 for_each_cpu_mask(cpu, *cpu_map) { 7041 for_each_cpu_mask_nr(cpu, *cpu_map) {
7042 struct sched_group **sched_group_nodes 7042 struct sched_group **sched_group_nodes
7043 = sched_group_nodes_bycpu[cpu]; 7043 = sched_group_nodes_bycpu[cpu];
7044 7044
@@ -7277,7 +7277,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7277 /* 7277 /*
7278 * Set up domains for cpus specified by the cpu_map. 7278 * Set up domains for cpus specified by the cpu_map.
7279 */ 7279 */
7280 for_each_cpu_mask(i, *cpu_map) { 7280 for_each_cpu_mask_nr(i, *cpu_map) {
7281 struct sched_domain *sd = NULL, *p; 7281 struct sched_domain *sd = NULL, *p;
7282 SCHED_CPUMASK_VAR(nodemask, allmasks); 7282 SCHED_CPUMASK_VAR(nodemask, allmasks);
7283 7283
@@ -7344,7 +7344,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7344 7344
7345#ifdef CONFIG_SCHED_SMT 7345#ifdef CONFIG_SCHED_SMT
7346 /* Set up CPU (sibling) groups */ 7346 /* Set up CPU (sibling) groups */
7347 for_each_cpu_mask(i, *cpu_map) { 7347 for_each_cpu_mask_nr(i, *cpu_map) {
7348 SCHED_CPUMASK_VAR(this_sibling_map, allmasks); 7348 SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
7349 SCHED_CPUMASK_VAR(send_covered, allmasks); 7349 SCHED_CPUMASK_VAR(send_covered, allmasks);
7350 7350
@@ -7361,7 +7361,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7361 7361
7362#ifdef CONFIG_SCHED_MC 7362#ifdef CONFIG_SCHED_MC
7363 /* Set up multi-core groups */ 7363 /* Set up multi-core groups */
7364 for_each_cpu_mask(i, *cpu_map) { 7364 for_each_cpu_mask_nr(i, *cpu_map) {
7365 SCHED_CPUMASK_VAR(this_core_map, allmasks); 7365 SCHED_CPUMASK_VAR(this_core_map, allmasks);
7366 SCHED_CPUMASK_VAR(send_covered, allmasks); 7366 SCHED_CPUMASK_VAR(send_covered, allmasks);
7367 7367
@@ -7428,7 +7428,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7428 goto error; 7428 goto error;
7429 } 7429 }
7430 sched_group_nodes[i] = sg; 7430 sched_group_nodes[i] = sg;
7431 for_each_cpu_mask(j, *nodemask) { 7431 for_each_cpu_mask_nr(j, *nodemask) {
7432 struct sched_domain *sd; 7432 struct sched_domain *sd;
7433 7433
7434 sd = &per_cpu(node_domains, j); 7434 sd = &per_cpu(node_domains, j);
@@ -7474,21 +7474,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7474 7474
7475 /* Calculate CPU power for physical packages and nodes */ 7475 /* Calculate CPU power for physical packages and nodes */
7476#ifdef CONFIG_SCHED_SMT 7476#ifdef CONFIG_SCHED_SMT
7477 for_each_cpu_mask(i, *cpu_map) { 7477 for_each_cpu_mask_nr(i, *cpu_map) {
7478 struct sched_domain *sd = &per_cpu(cpu_domains, i); 7478 struct sched_domain *sd = &per_cpu(cpu_domains, i);
7479 7479
7480 init_sched_groups_power(i, sd); 7480 init_sched_groups_power(i, sd);
7481 } 7481 }
7482#endif 7482#endif
7483#ifdef CONFIG_SCHED_MC 7483#ifdef CONFIG_SCHED_MC
7484 for_each_cpu_mask(i, *cpu_map) { 7484 for_each_cpu_mask_nr(i, *cpu_map) {
7485 struct sched_domain *sd = &per_cpu(core_domains, i); 7485 struct sched_domain *sd = &per_cpu(core_domains, i);
7486 7486
7487 init_sched_groups_power(i, sd); 7487 init_sched_groups_power(i, sd);
7488 } 7488 }
7489#endif 7489#endif
7490 7490
7491 for_each_cpu_mask(i, *cpu_map) { 7491 for_each_cpu_mask_nr(i, *cpu_map) {
7492 struct sched_domain *sd = &per_cpu(phys_domains, i); 7492 struct sched_domain *sd = &per_cpu(phys_domains, i);
7493 7493
7494 init_sched_groups_power(i, sd); 7494 init_sched_groups_power(i, sd);
@@ -7508,7 +7508,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7508#endif 7508#endif
7509 7509
7510 /* Attach the domains */ 7510 /* Attach the domains */
7511 for_each_cpu_mask(i, *cpu_map) { 7511 for_each_cpu_mask_nr(i, *cpu_map) {
7512 struct sched_domain *sd; 7512 struct sched_domain *sd;
7513#ifdef CONFIG_SCHED_SMT 7513#ifdef CONFIG_SCHED_SMT
7514 sd = &per_cpu(cpu_domains, i); 7514 sd = &per_cpu(cpu_domains, i);
@@ -7603,7 +7603,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
7603 7603
7604 unregister_sched_domain_sysctl(); 7604 unregister_sched_domain_sysctl();
7605 7605
7606 for_each_cpu_mask(i, *cpu_map) 7606 for_each_cpu_mask_nr(i, *cpu_map)
7607 cpu_attach_domain(NULL, &def_root_domain, i); 7607 cpu_attach_domain(NULL, &def_root_domain, i);
7608 synchronize_sched(); 7608 synchronize_sched();
7609 arch_destroy_sched_domains(cpu_map, &tmpmask); 7609 arch_destroy_sched_domains(cpu_map, &tmpmask);