aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-07-14 07:00:06 -0400
committerIngo Molnar <mingo@elte.hu>2011-07-20 12:32:40 -0400
commit9c3f75cbd144014bea6af866a154cc2e73ab2287 (patch)
tree2a565f7ff0820269973415c5605e313cfff903b4 /kernel/sched.c
parente6625fa48e6580a74b7e700efd7e6463e282810b (diff)
sched: Break out cpu_power from the sched_group structure
In order to prepare for non-unique sched_groups per domain, we need to carry the cpu_power elsewhere, so put a level of indirection in. Reported-and-tested-by: Anton Blanchard <anton@samba.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/n/tip-qkho2byuhe4482fuknss40ad@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c32
1 files changed, 26 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 3dc716f6d8a..36c10d25d4c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6557,7 +6557,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6557 break; 6557 break;
6558 } 6558 }
6559 6559
6560 if (!group->cpu_power) { 6560 if (!group->sgp->power) {
6561 printk(KERN_CONT "\n"); 6561 printk(KERN_CONT "\n");
6562 printk(KERN_ERR "ERROR: domain->cpu_power not " 6562 printk(KERN_ERR "ERROR: domain->cpu_power not "
6563 "set\n"); 6563 "set\n");
@@ -6581,9 +6581,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6581 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 6581 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
6582 6582
6583 printk(KERN_CONT " %s", str); 6583 printk(KERN_CONT " %s", str);
6584 if (group->cpu_power != SCHED_POWER_SCALE) { 6584 if (group->sgp->power != SCHED_POWER_SCALE) {
6585 printk(KERN_CONT " (cpu_power = %d)", 6585 printk(KERN_CONT " (cpu_power = %d)",
6586 group->cpu_power); 6586 group->sgp->power);
6587 } 6587 }
6588 6588
6589 group = group->next; 6589 group = group->next;
@@ -6777,8 +6777,10 @@ static struct root_domain *alloc_rootdomain(void)
6777static void free_sched_domain(struct rcu_head *rcu) 6777static void free_sched_domain(struct rcu_head *rcu)
6778{ 6778{
6779 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 6779 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
6780 if (atomic_dec_and_test(&sd->groups->ref)) 6780 if (atomic_dec_and_test(&sd->groups->ref)) {
6781 kfree(sd->groups->sgp);
6781 kfree(sd->groups); 6782 kfree(sd->groups);
6783 }
6782 kfree(sd); 6784 kfree(sd);
6783} 6785}
6784 6786
@@ -6945,6 +6947,7 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
6945struct sd_data { 6947struct sd_data {
6946 struct sched_domain **__percpu sd; 6948 struct sched_domain **__percpu sd;
6947 struct sched_group **__percpu sg; 6949 struct sched_group **__percpu sg;
6950 struct sched_group_power **__percpu sgp;
6948}; 6951};
6949 6952
6950struct s_data { 6953struct s_data {
@@ -6981,8 +6984,10 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
6981 if (child) 6984 if (child)
6982 cpu = cpumask_first(sched_domain_span(child)); 6985 cpu = cpumask_first(sched_domain_span(child));
6983 6986
6984 if (sg) 6987 if (sg) {
6985 *sg = *per_cpu_ptr(sdd->sg, cpu); 6988 *sg = *per_cpu_ptr(sdd->sg, cpu);
6989 (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
6990 }
6986 6991
6987 return cpu; 6992 return cpu;
6988} 6993}
@@ -7020,7 +7025,7 @@ build_sched_groups(struct sched_domain *sd)
7020 continue; 7025 continue;
7021 7026
7022 cpumask_clear(sched_group_cpus(sg)); 7027 cpumask_clear(sched_group_cpus(sg));
7023 sg->cpu_power = 0; 7028 sg->sgp->power = 0;
7024 7029
7025 for_each_cpu(j, span) { 7030 for_each_cpu(j, span) {
7026 if (get_group(j, sdd, NULL) != group) 7031 if (get_group(j, sdd, NULL) != group)
@@ -7185,6 +7190,7 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
7185 if (cpu == cpumask_first(sched_group_cpus(sg))) { 7190 if (cpu == cpumask_first(sched_group_cpus(sg))) {
7186 WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg); 7191 WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg);
7187 *per_cpu_ptr(sdd->sg, cpu) = NULL; 7192 *per_cpu_ptr(sdd->sg, cpu) = NULL;
7193 *per_cpu_ptr(sdd->sgp, cpu) = NULL;
7188 } 7194 }
7189} 7195}
7190 7196
@@ -7234,9 +7240,14 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
7234 if (!sdd->sg) 7240 if (!sdd->sg)
7235 return -ENOMEM; 7241 return -ENOMEM;
7236 7242
7243 sdd->sgp = alloc_percpu(struct sched_group_power *);
7244 if (!sdd->sgp)
7245 return -ENOMEM;
7246
7237 for_each_cpu(j, cpu_map) { 7247 for_each_cpu(j, cpu_map) {
7238 struct sched_domain *sd; 7248 struct sched_domain *sd;
7239 struct sched_group *sg; 7249 struct sched_group *sg;
7250 struct sched_group_power *sgp;
7240 7251
7241 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 7252 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
7242 GFP_KERNEL, cpu_to_node(j)); 7253 GFP_KERNEL, cpu_to_node(j));
@@ -7251,6 +7262,13 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
7251 return -ENOMEM; 7262 return -ENOMEM;
7252 7263
7253 *per_cpu_ptr(sdd->sg, j) = sg; 7264 *per_cpu_ptr(sdd->sg, j) = sg;
7265
7266 sgp = kzalloc_node(sizeof(struct sched_group_power),
7267 GFP_KERNEL, cpu_to_node(j));
7268 if (!sgp)
7269 return -ENOMEM;
7270
7271 *per_cpu_ptr(sdd->sgp, j) = sgp;
7254 } 7272 }
7255 } 7273 }
7256 7274
@@ -7268,9 +7286,11 @@ static void __sdt_free(const struct cpumask *cpu_map)
7268 for_each_cpu(j, cpu_map) { 7286 for_each_cpu(j, cpu_map) {
7269 kfree(*per_cpu_ptr(sdd->sd, j)); 7287 kfree(*per_cpu_ptr(sdd->sd, j));
7270 kfree(*per_cpu_ptr(sdd->sg, j)); 7288 kfree(*per_cpu_ptr(sdd->sg, j));
7289 kfree(*per_cpu_ptr(sdd->sgp, j));
7271 } 7290 }
7272 free_percpu(sdd->sd); 7291 free_percpu(sdd->sd);
7273 free_percpu(sdd->sg); 7292 free_percpu(sdd->sg);
7293 free_percpu(sdd->sgp);
7274 } 7294 }
7275} 7295}
7276 7296