aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-05-09 04:37:59 -0400
committerIngo Molnar <mingo@kernel.org>2016-09-30 04:54:06 -0400
commit24fc7edb92eea05946119cc0258c891c26b3b469 (patch)
treef746d2b660304243515e81f978145fca61783f63
parent16f3ef46805a5ffc75549deac2ff6af08bdf590b (diff)
sched/core: Introduce 'struct sched_domain_shared'
Since struct sched_domain is strictly per cpu; introduce a structure that is shared between all 'identical' sched_domains. Limit to SD_SHARE_PKG_RESOURCES domains for now, as we'll only use it for shared cache state; if another use comes up later we can easily relax this. While the sched_group's are normally shared between CPUs, these are not natural to use when we need some shared state on a domain level -- since that would require the domain to have a parent, which is not a given. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/sched.h6
-rw-r--r--kernel/sched/core.c44
2 files changed, 45 insertions, 5 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b99fcd1b341e..8a878b9649a1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1067,6 +1067,10 @@ extern int sched_domain_level_max;
1067 1067
1068struct sched_group; 1068struct sched_group;
1069 1069
1070struct sched_domain_shared {
1071 atomic_t ref;
1072};
1073
1070struct sched_domain { 1074struct sched_domain {
1071 /* These fields must be setup */ 1075 /* These fields must be setup */
1072 struct sched_domain *parent; /* top domain must be null terminated */ 1076 struct sched_domain *parent; /* top domain must be null terminated */
@@ -1135,6 +1139,7 @@ struct sched_domain {
1135 void *private; /* used during construction */ 1139 void *private; /* used during construction */
1136 struct rcu_head rcu; /* used during destruction */ 1140 struct rcu_head rcu; /* used during destruction */
1137 }; 1141 };
1142 struct sched_domain_shared *shared;
1138 1143
1139 unsigned int span_weight; 1144 unsigned int span_weight;
1140 /* 1145 /*
@@ -1168,6 +1173,7 @@ typedef int (*sched_domain_flags_f)(void);
1168 1173
1169struct sd_data { 1174struct sd_data {
1170 struct sched_domain **__percpu sd; 1175 struct sched_domain **__percpu sd;
1176 struct sched_domain_shared **__percpu sds;
1171 struct sched_group **__percpu sg; 1177 struct sched_group **__percpu sg;
1172 struct sched_group_capacity **__percpu sgc; 1178 struct sched_group_capacity **__percpu sgc;
1173}; 1179};
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 662d08d7b1df..97d3c18d00bf 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5947,6 +5947,8 @@ static void destroy_sched_domain(struct sched_domain *sd)
5947 kfree(sd->groups->sgc); 5947 kfree(sd->groups->sgc);
5948 kfree(sd->groups); 5948 kfree(sd->groups);
5949 } 5949 }
5950 if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
5951 kfree(sd->shared);
5950 kfree(sd); 5952 kfree(sd);
5951} 5953}
5952 5954
@@ -6385,6 +6387,9 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
6385 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 6387 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6386 *per_cpu_ptr(sdd->sd, cpu) = NULL; 6388 *per_cpu_ptr(sdd->sd, cpu) = NULL;
6387 6389
6390 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
6391 *per_cpu_ptr(sdd->sds, cpu) = NULL;
6392
6388 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 6393 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
6389 *per_cpu_ptr(sdd->sg, cpu) = NULL; 6394 *per_cpu_ptr(sdd->sg, cpu) = NULL;
6390 6395
@@ -6429,10 +6434,12 @@ static int sched_domains_curr_level;
6429 6434
6430static struct sched_domain * 6435static struct sched_domain *
6431sd_init(struct sched_domain_topology_level *tl, 6436sd_init(struct sched_domain_topology_level *tl,
6437 const struct cpumask *cpu_map,
6432 struct sched_domain *child, int cpu) 6438 struct sched_domain *child, int cpu)
6433{ 6439{
6434 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); 6440 struct sd_data *sdd = &tl->data;
6435 int sd_weight, sd_flags = 0; 6441 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
6442 int sd_id, sd_weight, sd_flags = 0;
6436 6443
6437#ifdef CONFIG_NUMA 6444#ifdef CONFIG_NUMA
6438 /* 6445 /*
@@ -6487,6 +6494,9 @@ sd_init(struct sched_domain_topology_level *tl,
6487#endif 6494#endif
6488 }; 6495 };
6489 6496
6497 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
6498 sd_id = cpumask_first(sched_domain_span(sd));
6499
6490 /* 6500 /*
6491 * Convert topological properties into behaviour. 6501 * Convert topological properties into behaviour.
6492 */ 6502 */
@@ -6529,7 +6539,16 @@ sd_init(struct sched_domain_topology_level *tl,
6529 sd->idle_idx = 1; 6539 sd->idle_idx = 1;
6530 } 6540 }
6531 6541
6532 sd->private = &tl->data; 6542 /*
6543 * For all levels sharing cache; connect a sched_domain_shared
6544 * instance.
6545 */
6546 if (sd->flags & SD_SHARE_PKG_RESOURCES) {
6547 sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
6548 atomic_inc(&sd->shared->ref);
6549 }
6550
6551 sd->private = sdd;
6533 6552
6534 return sd; 6553 return sd;
6535} 6554}
@@ -6839,6 +6858,10 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
6839 if (!sdd->sd) 6858 if (!sdd->sd)
6840 return -ENOMEM; 6859 return -ENOMEM;
6841 6860
6861 sdd->sds = alloc_percpu(struct sched_domain_shared *);
6862 if (!sdd->sds)
6863 return -ENOMEM;
6864
6842 sdd->sg = alloc_percpu(struct sched_group *); 6865 sdd->sg = alloc_percpu(struct sched_group *);
6843 if (!sdd->sg) 6866 if (!sdd->sg)
6844 return -ENOMEM; 6867 return -ENOMEM;
@@ -6849,6 +6872,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
6849 6872
6850 for_each_cpu(j, cpu_map) { 6873 for_each_cpu(j, cpu_map) {
6851 struct sched_domain *sd; 6874 struct sched_domain *sd;
6875 struct sched_domain_shared *sds;
6852 struct sched_group *sg; 6876 struct sched_group *sg;
6853 struct sched_group_capacity *sgc; 6877 struct sched_group_capacity *sgc;
6854 6878
@@ -6859,6 +6883,13 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
6859 6883
6860 *per_cpu_ptr(sdd->sd, j) = sd; 6884 *per_cpu_ptr(sdd->sd, j) = sd;
6861 6885
6886 sds = kzalloc_node(sizeof(struct sched_domain_shared),
6887 GFP_KERNEL, cpu_to_node(j));
6888 if (!sds)
6889 return -ENOMEM;
6890
6891 *per_cpu_ptr(sdd->sds, j) = sds;
6892
6862 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 6893 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6863 GFP_KERNEL, cpu_to_node(j)); 6894 GFP_KERNEL, cpu_to_node(j));
6864 if (!sg) 6895 if (!sg)
@@ -6898,6 +6929,8 @@ static void __sdt_free(const struct cpumask *cpu_map)
6898 kfree(*per_cpu_ptr(sdd->sd, j)); 6929 kfree(*per_cpu_ptr(sdd->sd, j));
6899 } 6930 }
6900 6931
6932 if (sdd->sds)
6933 kfree(*per_cpu_ptr(sdd->sds, j));
6901 if (sdd->sg) 6934 if (sdd->sg)
6902 kfree(*per_cpu_ptr(sdd->sg, j)); 6935 kfree(*per_cpu_ptr(sdd->sg, j));
6903 if (sdd->sgc) 6936 if (sdd->sgc)
@@ -6905,6 +6938,8 @@ static void __sdt_free(const struct cpumask *cpu_map)
6905 } 6938 }
6906 free_percpu(sdd->sd); 6939 free_percpu(sdd->sd);
6907 sdd->sd = NULL; 6940 sdd->sd = NULL;
6941 free_percpu(sdd->sds);
6942 sdd->sds = NULL;
6908 free_percpu(sdd->sg); 6943 free_percpu(sdd->sg);
6909 sdd->sg = NULL; 6944 sdd->sg = NULL;
6910 free_percpu(sdd->sgc); 6945 free_percpu(sdd->sgc);
@@ -6916,9 +6951,8 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6916 const struct cpumask *cpu_map, struct sched_domain_attr *attr, 6951 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
6917 struct sched_domain *child, int cpu) 6952 struct sched_domain *child, int cpu)
6918{ 6953{
6919 struct sched_domain *sd = sd_init(tl, child, cpu); 6954 struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
6920 6955
6921 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
6922 if (child) { 6956 if (child) {
6923 sd->level = child->level + 1; 6957 sd->level = child->level + 1;
6924 sched_domain_level_max = max(sched_domain_level_max, sd->level); 6958 sched_domain_level_max = max(sched_domain_level_max, sd->level);