diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-03 15:04:39 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-03 15:04:39 -0500 |
commit | 7d3b56ba37a95f1f370f50258ed3954c304c524b (patch) | |
tree | 86102527b92f02450aa245f084ffb491c18d2e0a /kernel/sched.c | |
parent | 269b012321f2f1f8e4648c43a93bf432b42c6668 (diff) | |
parent | ab14398abd195af91a744c320a52a1bce814dd1e (diff) |
Merge branch 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (77 commits)
x86: setup_per_cpu_areas() cleanup
cpumask: fix compile error when CONFIG_NR_CPUS is not defined
cpumask: use alloc_cpumask_var_node where appropriate
cpumask: convert shared_cpu_map in acpi_processor* structs to cpumask_var_t
x86: use cpumask_var_t in acpi/boot.c
x86: cleanup some remaining usages of NR_CPUS where s/b nr_cpu_ids
sched: put back some stack hog changes that were undone in kernel/sched.c
x86: enable cpus display of kernel_max and offlined cpus
ia64: cpumask fix for is_affinity_mask_valid()
cpumask: convert RCU implementations, fix
xtensa: define __fls
mn10300: define __fls
m32r: define __fls
h8300: define __fls
frv: define __fls
cris: define __fls
cpumask: CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
cpumask: zero extra bits in alloc_cpumask_var_node
cpumask: replace for_each_cpu_mask_nr with for_each_cpu in kernel/time/
cpumask: convert mm/
...
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 53 |
1 files changed, 15 insertions, 38 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 930bf2e6d714..545c6fccd1dc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3715,7 +3715,7 @@ redo: | |||
3715 | * don't kick the migration_thread, if the curr | 3715 | * don't kick the migration_thread, if the curr |
3716 | * task on busiest cpu can't be moved to this_cpu | 3716 | * task on busiest cpu can't be moved to this_cpu |
3717 | */ | 3717 | */ |
3718 | if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { | 3718 | if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { |
3719 | double_unlock_balance(this_rq, busiest); | 3719 | double_unlock_balance(this_rq, busiest); |
3720 | all_pinned = 1; | 3720 | all_pinned = 1; |
3721 | return ld_moved; | 3721 | return ld_moved; |
@@ -6257,9 +6257,7 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | |||
6257 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 6257 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
6258 | { | 6258 | { |
6259 | int dest_cpu; | 6259 | int dest_cpu; |
6260 | /* FIXME: Use cpumask_of_node here. */ | 6260 | const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu)); |
6261 | cpumask_t _nodemask = node_to_cpumask(cpu_to_node(dead_cpu)); | ||
6262 | const struct cpumask *nodemask = &_nodemask; | ||
6263 | 6261 | ||
6264 | again: | 6262 | again: |
6265 | /* Look for allowed, online CPU in same node. */ | 6263 | /* Look for allowed, online CPU in same node. */ |
@@ -7170,21 +7168,18 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) | |||
7170 | static void sched_domain_node_span(int node, struct cpumask *span) | 7168 | static void sched_domain_node_span(int node, struct cpumask *span) |
7171 | { | 7169 | { |
7172 | nodemask_t used_nodes; | 7170 | nodemask_t used_nodes; |
7173 | /* FIXME: use cpumask_of_node() */ | ||
7174 | node_to_cpumask_ptr(nodemask, node); | ||
7175 | int i; | 7171 | int i; |
7176 | 7172 | ||
7177 | cpus_clear(*span); | 7173 | cpumask_clear(span); |
7178 | nodes_clear(used_nodes); | 7174 | nodes_clear(used_nodes); |
7179 | 7175 | ||
7180 | cpus_or(*span, *span, *nodemask); | 7176 | cpumask_or(span, span, cpumask_of_node(node)); |
7181 | node_set(node, used_nodes); | 7177 | node_set(node, used_nodes); |
7182 | 7178 | ||
7183 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { | 7179 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { |
7184 | int next_node = find_next_best_node(node, &used_nodes); | 7180 | int next_node = find_next_best_node(node, &used_nodes); |
7185 | 7181 | ||
7186 | node_to_cpumask_ptr_next(nodemask, next_node); | 7182 | cpumask_or(span, span, cpumask_of_node(next_node)); |
7187 | cpus_or(*span, *span, *nodemask); | ||
7188 | } | 7183 | } |
7189 | } | 7184 | } |
7190 | #endif /* CONFIG_NUMA */ | 7185 | #endif /* CONFIG_NUMA */ |
@@ -7264,9 +7259,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, | |||
7264 | { | 7259 | { |
7265 | int group; | 7260 | int group; |
7266 | #ifdef CONFIG_SCHED_MC | 7261 | #ifdef CONFIG_SCHED_MC |
7267 | /* FIXME: Use cpu_coregroup_mask. */ | 7262 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); |
7268 | *mask = cpu_coregroup_map(cpu); | ||
7269 | cpus_and(*mask, *mask, *cpu_map); | ||
7270 | group = cpumask_first(mask); | 7263 | group = cpumask_first(mask); |
7271 | #elif defined(CONFIG_SCHED_SMT) | 7264 | #elif defined(CONFIG_SCHED_SMT) |
7272 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); | 7265 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
@@ -7296,10 +7289,8 @@ static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, | |||
7296 | struct cpumask *nodemask) | 7289 | struct cpumask *nodemask) |
7297 | { | 7290 | { |
7298 | int group; | 7291 | int group; |
7299 | /* FIXME: use cpumask_of_node */ | ||
7300 | node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu)); | ||
7301 | 7292 | ||
7302 | cpumask_and(nodemask, pnodemask, cpu_map); | 7293 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map); |
7303 | group = cpumask_first(nodemask); | 7294 | group = cpumask_first(nodemask); |
7304 | 7295 | ||
7305 | if (sg) | 7296 | if (sg) |
@@ -7350,10 +7341,8 @@ static void free_sched_groups(const struct cpumask *cpu_map, | |||
7350 | 7341 | ||
7351 | for (i = 0; i < nr_node_ids; i++) { | 7342 | for (i = 0; i < nr_node_ids; i++) { |
7352 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 7343 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
7353 | /* FIXME: Use cpumask_of_node */ | ||
7354 | node_to_cpumask_ptr(pnodemask, i); | ||
7355 | 7344 | ||
7356 | cpus_and(*nodemask, *pnodemask, *cpu_map); | 7345 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
7357 | if (cpumask_empty(nodemask)) | 7346 | if (cpumask_empty(nodemask)) |
7358 | continue; | 7347 | continue; |
7359 | 7348 | ||
@@ -7562,9 +7551,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7562 | for_each_cpu(i, cpu_map) { | 7551 | for_each_cpu(i, cpu_map) { |
7563 | struct sched_domain *sd = NULL, *p; | 7552 | struct sched_domain *sd = NULL, *p; |
7564 | 7553 | ||
7565 | /* FIXME: use cpumask_of_node */ | 7554 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map); |
7566 | *nodemask = node_to_cpumask(cpu_to_node(i)); | ||
7567 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7568 | 7555 | ||
7569 | #ifdef CONFIG_NUMA | 7556 | #ifdef CONFIG_NUMA |
7570 | if (cpumask_weight(cpu_map) > | 7557 | if (cpumask_weight(cpu_map) > |
@@ -7605,9 +7592,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7605 | sd = &per_cpu(core_domains, i).sd; | 7592 | sd = &per_cpu(core_domains, i).sd; |
7606 | SD_INIT(sd, MC); | 7593 | SD_INIT(sd, MC); |
7607 | set_domain_attribute(sd, attr); | 7594 | set_domain_attribute(sd, attr); |
7608 | *sched_domain_span(sd) = cpu_coregroup_map(i); | 7595 | cpumask_and(sched_domain_span(sd), cpu_map, |
7609 | cpumask_and(sched_domain_span(sd), | 7596 | cpu_coregroup_mask(i)); |
7610 | sched_domain_span(sd), cpu_map); | ||
7611 | sd->parent = p; | 7597 | sd->parent = p; |
7612 | p->child = sd; | 7598 | p->child = sd; |
7613 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); | 7599 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); |
@@ -7643,9 +7629,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7643 | #ifdef CONFIG_SCHED_MC | 7629 | #ifdef CONFIG_SCHED_MC |
7644 | /* Set up multi-core groups */ | 7630 | /* Set up multi-core groups */ |
7645 | for_each_cpu(i, cpu_map) { | 7631 | for_each_cpu(i, cpu_map) { |
7646 | /* FIXME: Use cpu_coregroup_mask */ | 7632 | cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map); |
7647 | *this_core_map = cpu_coregroup_map(i); | ||
7648 | cpus_and(*this_core_map, *this_core_map, *cpu_map); | ||
7649 | if (i != cpumask_first(this_core_map)) | 7633 | if (i != cpumask_first(this_core_map)) |
7650 | continue; | 7634 | continue; |
7651 | 7635 | ||
@@ -7657,9 +7641,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7657 | 7641 | ||
7658 | /* Set up physical groups */ | 7642 | /* Set up physical groups */ |
7659 | for (i = 0; i < nr_node_ids; i++) { | 7643 | for (i = 0; i < nr_node_ids; i++) { |
7660 | /* FIXME: Use cpumask_of_node */ | 7644 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
7661 | *nodemask = node_to_cpumask(i); | ||
7662 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7663 | if (cpumask_empty(nodemask)) | 7645 | if (cpumask_empty(nodemask)) |
7664 | continue; | 7646 | continue; |
7665 | 7647 | ||
@@ -7681,11 +7663,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7681 | struct sched_group *sg, *prev; | 7663 | struct sched_group *sg, *prev; |
7682 | int j; | 7664 | int j; |
7683 | 7665 | ||
7684 | /* FIXME: Use cpumask_of_node */ | ||
7685 | *nodemask = node_to_cpumask(i); | ||
7686 | cpumask_clear(covered); | 7666 | cpumask_clear(covered); |
7687 | 7667 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); | |
7688 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7689 | if (cpumask_empty(nodemask)) { | 7668 | if (cpumask_empty(nodemask)) { |
7690 | sched_group_nodes[i] = NULL; | 7669 | sched_group_nodes[i] = NULL; |
7691 | continue; | 7670 | continue; |
@@ -7716,8 +7695,6 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7716 | 7695 | ||
7717 | for (j = 0; j < nr_node_ids; j++) { | 7696 | for (j = 0; j < nr_node_ids; j++) { |
7718 | int n = (i + j) % nr_node_ids; | 7697 | int n = (i + j) % nr_node_ids; |
7719 | /* FIXME: Use cpumask_of_node */ | ||
7720 | node_to_cpumask_ptr(pnodemask, n); | ||
7721 | 7698 | ||
7722 | cpumask_complement(notcovered, covered); | 7699 | cpumask_complement(notcovered, covered); |
7723 | cpumask_and(tmpmask, notcovered, cpu_map); | 7700 | cpumask_and(tmpmask, notcovered, cpu_map); |
@@ -7725,7 +7702,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7725 | if (cpumask_empty(tmpmask)) | 7702 | if (cpumask_empty(tmpmask)) |
7726 | break; | 7703 | break; |
7727 | 7704 | ||
7728 | cpumask_and(tmpmask, tmpmask, pnodemask); | 7705 | cpumask_and(tmpmask, tmpmask, cpumask_of_node(n)); |
7729 | if (cpumask_empty(tmpmask)) | 7706 | if (cpumask_empty(tmpmask)) |
7730 | continue; | 7707 | continue; |
7731 | 7708 | ||