aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c32
1 files changed, 19 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 5c829d4ba8f1..2ecec06e3f0c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8546,6 +8546,23 @@ static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
8546 return sd; 8546 return sd;
8547} 8547}
8548 8548
8549static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
8550 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
8551 struct sched_domain *parent, int i)
8552{
8553 struct sched_domain *sd = parent;
8554#ifdef CONFIG_SCHED_SMT
8555 sd = &per_cpu(cpu_domains, i).sd;
8556 SD_INIT(sd, SIBLING);
8557 set_domain_attribute(sd, attr);
8558 cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
8559 sd->parent = parent;
8560 parent->child = sd;
8561 cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask);
8562#endif
8563 return sd;
8564}
8565
8549/* 8566/*
8550 * Build sched domains for a given set of cpus and attach the sched domains 8567 * Build sched domains for a given set of cpus and attach the sched domains
8551 * to the individual cpus 8568 * to the individual cpus
@@ -8569,7 +8586,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8569 * Set up domains for cpus specified by the cpu_map. 8586 * Set up domains for cpus specified by the cpu_map.
8570 */ 8587 */
8571 for_each_cpu(i, cpu_map) { 8588 for_each_cpu(i, cpu_map) {
8572 struct sched_domain *sd = NULL, *p; 8589 struct sched_domain *sd;
8573 8590
8574 cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)), 8591 cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
8575 cpu_map); 8592 cpu_map);
@@ -8577,18 +8594,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8577 sd = __build_numa_sched_domains(&d, cpu_map, attr, i); 8594 sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
8578 sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i); 8595 sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
8579 sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); 8596 sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
8580 8597 sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
8581#ifdef CONFIG_SCHED_SMT
8582 p = sd;
8583 sd = &per_cpu(cpu_domains, i).sd;
8584 SD_INIT(sd, SIBLING);
8585 set_domain_attribute(sd, attr);
8586 cpumask_and(sched_domain_span(sd),
8587 topology_thread_cpumask(i), cpu_map);
8588 sd->parent = p;
8589 p->child = sd;
8590 cpu_to_cpu_group(i, cpu_map, &sd->groups, d.tmpmask);
8591#endif
8592 } 8598 }
8593 8599
8594#ifdef CONFIG_SCHED_SMT 8600#ifdef CONFIG_SCHED_SMT