diff options
Diffstat (limited to 'kernel/sched/topology.c')
-rw-r--r-- | kernel/sched/topology.c | 31 |
1 files changed, 18 insertions, 13 deletions
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index ab7f371a3a17..f53f89df837d 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c | |||
@@ -615,13 +615,13 @@ static void destroy_sched_domains(struct sched_domain *sd) | |||
615 | * the cpumask of the domain), this allows us to quickly tell if | 615 | * the cpumask of the domain), this allows us to quickly tell if |
616 | * two CPUs are in the same cache domain, see cpus_share_cache(). | 616 | * two CPUs are in the same cache domain, see cpus_share_cache(). |
617 | */ | 617 | */ |
618 | DEFINE_PER_CPU(struct sched_domain *, sd_llc); | 618 | DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); |
619 | DEFINE_PER_CPU(int, sd_llc_size); | 619 | DEFINE_PER_CPU(int, sd_llc_size); |
620 | DEFINE_PER_CPU(int, sd_llc_id); | 620 | DEFINE_PER_CPU(int, sd_llc_id); |
621 | DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); | 621 | DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); |
622 | DEFINE_PER_CPU(struct sched_domain *, sd_numa); | 622 | DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); |
623 | DEFINE_PER_CPU(struct sched_domain *, sd_asym_packing); | 623 | DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); |
624 | DEFINE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity); | 624 | DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); |
625 | DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); | 625 | DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); |
626 | 626 | ||
627 | static void update_top_cache_domain(int cpu) | 627 | static void update_top_cache_domain(int cpu) |
@@ -1059,6 +1059,7 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd) | |||
1059 | struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); | 1059 | struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); |
1060 | struct sched_domain *child = sd->child; | 1060 | struct sched_domain *child = sd->child; |
1061 | struct sched_group *sg; | 1061 | struct sched_group *sg; |
1062 | bool already_visited; | ||
1062 | 1063 | ||
1063 | if (child) | 1064 | if (child) |
1064 | cpu = cpumask_first(sched_domain_span(child)); | 1065 | cpu = cpumask_first(sched_domain_span(child)); |
@@ -1066,9 +1067,14 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd) | |||
1066 | sg = *per_cpu_ptr(sdd->sg, cpu); | 1067 | sg = *per_cpu_ptr(sdd->sg, cpu); |
1067 | sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); | 1068 | sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); |
1068 | 1069 | ||
1069 | /* For claim_allocations: */ | 1070 | /* Increase refcounts for claim_allocations: */ |
1070 | atomic_inc(&sg->ref); | 1071 | already_visited = atomic_inc_return(&sg->ref) > 1; |
1071 | atomic_inc(&sg->sgc->ref); | 1072 | /* sgc visits should follow a similar trend as sg */ |
1073 | WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); | ||
1074 | |||
1075 | /* If we have already visited that group, it's already initialized. */ | ||
1076 | if (already_visited) | ||
1077 | return sg; | ||
1072 | 1078 | ||
1073 | if (child) { | 1079 | if (child) { |
1074 | cpumask_copy(sched_group_span(sg), sched_domain_span(child)); | 1080 | cpumask_copy(sched_group_span(sg), sched_domain_span(child)); |
@@ -1087,8 +1093,8 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd) | |||
1087 | 1093 | ||
1088 | /* | 1094 | /* |
1089 | * build_sched_groups will build a circular linked list of the groups | 1095 | * build_sched_groups will build a circular linked list of the groups |
1090 | * covered by the given span, and will set each group's ->cpumask correctly, | 1096 | * covered by the given span, will set each group's ->cpumask correctly, |
1091 | * and ->cpu_capacity to 0. | 1097 | * and will initialize their ->sgc. |
1092 | * | 1098 | * |
1093 | * Assumes the sched_domain tree is fully constructed | 1099 | * Assumes the sched_domain tree is fully constructed |
1094 | */ | 1100 | */ |
@@ -2075,9 +2081,8 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) | |||
2075 | } | 2081 | } |
2076 | 2082 | ||
2077 | /* | 2083 | /* |
2078 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 2084 | * Set up scheduler domains and groups. For now this just excludes isolated |
2079 | * For now this just excludes isolated CPUs, but could be used to | 2085 | * CPUs, but could be used to exclude other special cases in the future. |
2080 | * exclude other special cases in the future. | ||
2081 | */ | 2086 | */ |
2082 | int sched_init_domains(const struct cpumask *cpu_map) | 2087 | int sched_init_domains(const struct cpumask *cpu_map) |
2083 | { | 2088 | { |