aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c53
1 files changed, 15 insertions, 38 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 930bf2e6d714..545c6fccd1dc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3715,7 +3715,7 @@ redo:
3715 * don't kick the migration_thread, if the curr 3715 * don't kick the migration_thread, if the curr
3716 * task on busiest cpu can't be moved to this_cpu 3716 * task on busiest cpu can't be moved to this_cpu
3717 */ 3717 */
3718 if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { 3718 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
3719 double_unlock_balance(this_rq, busiest); 3719 double_unlock_balance(this_rq, busiest);
3720 all_pinned = 1; 3720 all_pinned = 1;
3721 return ld_moved; 3721 return ld_moved;
@@ -6257,9 +6257,7 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
6257static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 6257static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
6258{ 6258{
6259 int dest_cpu; 6259 int dest_cpu;
6260 /* FIXME: Use cpumask_of_node here. */ 6260 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
6261 cpumask_t _nodemask = node_to_cpumask(cpu_to_node(dead_cpu));
6262 const struct cpumask *nodemask = &_nodemask;
6263 6261
6264again: 6262again:
6265 /* Look for allowed, online CPU in same node. */ 6263 /* Look for allowed, online CPU in same node. */
@@ -7170,21 +7168,18 @@ static int find_next_best_node(int node, nodemask_t *used_nodes)
7170static void sched_domain_node_span(int node, struct cpumask *span) 7168static void sched_domain_node_span(int node, struct cpumask *span)
7171{ 7169{
7172 nodemask_t used_nodes; 7170 nodemask_t used_nodes;
7173 /* FIXME: use cpumask_of_node() */
7174 node_to_cpumask_ptr(nodemask, node);
7175 int i; 7171 int i;
7176 7172
7177 cpus_clear(*span); 7173 cpumask_clear(span);
7178 nodes_clear(used_nodes); 7174 nodes_clear(used_nodes);
7179 7175
7180 cpus_or(*span, *span, *nodemask); 7176 cpumask_or(span, span, cpumask_of_node(node));
7181 node_set(node, used_nodes); 7177 node_set(node, used_nodes);
7182 7178
7183 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { 7179 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
7184 int next_node = find_next_best_node(node, &used_nodes); 7180 int next_node = find_next_best_node(node, &used_nodes);
7185 7181
7186 node_to_cpumask_ptr_next(nodemask, next_node); 7182 cpumask_or(span, span, cpumask_of_node(next_node));
7187 cpus_or(*span, *span, *nodemask);
7188 } 7183 }
7189} 7184}
7190#endif /* CONFIG_NUMA */ 7185#endif /* CONFIG_NUMA */
@@ -7264,9 +7259,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
7264{ 7259{
7265 int group; 7260 int group;
7266#ifdef CONFIG_SCHED_MC 7261#ifdef CONFIG_SCHED_MC
7267 /* FIXME: Use cpu_coregroup_mask. */ 7262 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
7268 *mask = cpu_coregroup_map(cpu);
7269 cpus_and(*mask, *mask, *cpu_map);
7270 group = cpumask_first(mask); 7263 group = cpumask_first(mask);
7271#elif defined(CONFIG_SCHED_SMT) 7264#elif defined(CONFIG_SCHED_SMT)
7272 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); 7265 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
@@ -7296,10 +7289,8 @@ static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
7296 struct cpumask *nodemask) 7289 struct cpumask *nodemask)
7297{ 7290{
7298 int group; 7291 int group;
7299 /* FIXME: use cpumask_of_node */
7300 node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu));
7301 7292
7302 cpumask_and(nodemask, pnodemask, cpu_map); 7293 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
7303 group = cpumask_first(nodemask); 7294 group = cpumask_first(nodemask);
7304 7295
7305 if (sg) 7296 if (sg)
@@ -7350,10 +7341,8 @@ static void free_sched_groups(const struct cpumask *cpu_map,
7350 7341
7351 for (i = 0; i < nr_node_ids; i++) { 7342 for (i = 0; i < nr_node_ids; i++) {
7352 struct sched_group *oldsg, *sg = sched_group_nodes[i]; 7343 struct sched_group *oldsg, *sg = sched_group_nodes[i];
7353 /* FIXME: Use cpumask_of_node */
7354 node_to_cpumask_ptr(pnodemask, i);
7355 7344
7356 cpus_and(*nodemask, *pnodemask, *cpu_map); 7345 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
7357 if (cpumask_empty(nodemask)) 7346 if (cpumask_empty(nodemask))
7358 continue; 7347 continue;
7359 7348
@@ -7562,9 +7551,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7562 for_each_cpu(i, cpu_map) { 7551 for_each_cpu(i, cpu_map) {
7563 struct sched_domain *sd = NULL, *p; 7552 struct sched_domain *sd = NULL, *p;
7564 7553
7565 /* FIXME: use cpumask_of_node */ 7554 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map);
7566 *nodemask = node_to_cpumask(cpu_to_node(i));
7567 cpus_and(*nodemask, *nodemask, *cpu_map);
7568 7555
7569#ifdef CONFIG_NUMA 7556#ifdef CONFIG_NUMA
7570 if (cpumask_weight(cpu_map) > 7557 if (cpumask_weight(cpu_map) >
@@ -7605,9 +7592,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7605 sd = &per_cpu(core_domains, i).sd; 7592 sd = &per_cpu(core_domains, i).sd;
7606 SD_INIT(sd, MC); 7593 SD_INIT(sd, MC);
7607 set_domain_attribute(sd, attr); 7594 set_domain_attribute(sd, attr);
7608 *sched_domain_span(sd) = cpu_coregroup_map(i); 7595 cpumask_and(sched_domain_span(sd), cpu_map,
7609 cpumask_and(sched_domain_span(sd), 7596 cpu_coregroup_mask(i));
7610 sched_domain_span(sd), cpu_map);
7611 sd->parent = p; 7597 sd->parent = p;
7612 p->child = sd; 7598 p->child = sd;
7613 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); 7599 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
@@ -7643,9 +7629,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7643#ifdef CONFIG_SCHED_MC 7629#ifdef CONFIG_SCHED_MC
7644 /* Set up multi-core groups */ 7630 /* Set up multi-core groups */
7645 for_each_cpu(i, cpu_map) { 7631 for_each_cpu(i, cpu_map) {
7646 /* FIXME: Use cpu_coregroup_mask */ 7632 cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map);
7647 *this_core_map = cpu_coregroup_map(i);
7648 cpus_and(*this_core_map, *this_core_map, *cpu_map);
7649 if (i != cpumask_first(this_core_map)) 7633 if (i != cpumask_first(this_core_map))
7650 continue; 7634 continue;
7651 7635
@@ -7657,9 +7641,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7657 7641
7658 /* Set up physical groups */ 7642 /* Set up physical groups */
7659 for (i = 0; i < nr_node_ids; i++) { 7643 for (i = 0; i < nr_node_ids; i++) {
7660 /* FIXME: Use cpumask_of_node */ 7644 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
7661 *nodemask = node_to_cpumask(i);
7662 cpus_and(*nodemask, *nodemask, *cpu_map);
7663 if (cpumask_empty(nodemask)) 7645 if (cpumask_empty(nodemask))
7664 continue; 7646 continue;
7665 7647
@@ -7681,11 +7663,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7681 struct sched_group *sg, *prev; 7663 struct sched_group *sg, *prev;
7682 int j; 7664 int j;
7683 7665
7684 /* FIXME: Use cpumask_of_node */
7685 *nodemask = node_to_cpumask(i);
7686 cpumask_clear(covered); 7666 cpumask_clear(covered);
7687 7667 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
7688 cpus_and(*nodemask, *nodemask, *cpu_map);
7689 if (cpumask_empty(nodemask)) { 7668 if (cpumask_empty(nodemask)) {
7690 sched_group_nodes[i] = NULL; 7669 sched_group_nodes[i] = NULL;
7691 continue; 7670 continue;
@@ -7716,8 +7695,6 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7716 7695
7717 for (j = 0; j < nr_node_ids; j++) { 7696 for (j = 0; j < nr_node_ids; j++) {
7718 int n = (i + j) % nr_node_ids; 7697 int n = (i + j) % nr_node_ids;
7719 /* FIXME: Use cpumask_of_node */
7720 node_to_cpumask_ptr(pnodemask, n);
7721 7698
7722 cpumask_complement(notcovered, covered); 7699 cpumask_complement(notcovered, covered);
7723 cpumask_and(tmpmask, notcovered, cpu_map); 7700 cpumask_and(tmpmask, notcovered, cpu_map);
@@ -7725,7 +7702,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7725 if (cpumask_empty(tmpmask)) 7702 if (cpumask_empty(tmpmask))
7726 break; 7703 break;
7727 7704
7728 cpumask_and(tmpmask, tmpmask, pnodemask); 7705 cpumask_and(tmpmask, tmpmask, cpumask_of_node(n));
7729 if (cpumask_empty(tmpmask)) 7706 if (cpumask_empty(tmpmask))
7730 continue; 7707 continue;
7731 7708