aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-07-20 18:55:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-20 18:55:48 -0400
commitacc11eab70591744369722280c9ce162a6193494 (patch)
treef23138ad28b557311b6e512f40ec27fbc42481f1
parent919d25a710bd6ded210426e911c9f9ec535d8d9c (diff)
parentd110235d2c331c4f79e0879f51104be79e17a469 (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched: Avoid creating superfluous NUMA domains on non-NUMA systems sched: Allow for overlapping sched_domain spans sched: Break out cpu_power from the sched_group structure
-rw-r--r--include/linux/sched.h14
-rw-r--r--kernel/sched.c189
-rw-r--r--kernel/sched_fair.c46
-rw-r--r--kernel/sched_features.h2
4 files changed, 190 insertions, 61 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 496770a96487..bde99d5358dc 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -844,6 +844,7 @@ enum cpu_idle_type {
844#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ 844#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
845#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ 845#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
846#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ 846#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
847#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
847 848
848enum powersavings_balance_level { 849enum powersavings_balance_level {
849 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ 850 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
@@ -893,16 +894,21 @@ static inline int sd_power_saving_flags(void)
893 return 0; 894 return 0;
894} 895}
895 896
896struct sched_group { 897struct sched_group_power {
897 struct sched_group *next; /* Must be a circular list */
898 atomic_t ref; 898 atomic_t ref;
899
900 /* 899 /*
901 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 900 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
902 * single CPU. 901 * single CPU.
903 */ 902 */
904 unsigned int cpu_power, cpu_power_orig; 903 unsigned int power, power_orig;
904};
905
906struct sched_group {
907 struct sched_group *next; /* Must be a circular list */
908 atomic_t ref;
909
905 unsigned int group_weight; 910 unsigned int group_weight;
911 struct sched_group_power *sgp;
906 912
907 /* 913 /*
908 * The CPUs this group covers. 914 * The CPUs this group covers.
diff --git a/kernel/sched.c b/kernel/sched.c
index 3dc716f6d8ad..14168c49a154 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6557,7 +6557,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6557 break; 6557 break;
6558 } 6558 }
6559 6559
6560 if (!group->cpu_power) { 6560 if (!group->sgp->power) {
6561 printk(KERN_CONT "\n"); 6561 printk(KERN_CONT "\n");
6562 printk(KERN_ERR "ERROR: domain->cpu_power not " 6562 printk(KERN_ERR "ERROR: domain->cpu_power not "
6563 "set\n"); 6563 "set\n");
@@ -6581,9 +6581,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6581 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 6581 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
6582 6582
6583 printk(KERN_CONT " %s", str); 6583 printk(KERN_CONT " %s", str);
6584 if (group->cpu_power != SCHED_POWER_SCALE) { 6584 if (group->sgp->power != SCHED_POWER_SCALE) {
6585 printk(KERN_CONT " (cpu_power = %d)", 6585 printk(KERN_CONT " (cpu_power = %d)",
6586 group->cpu_power); 6586 group->sgp->power);
6587 } 6587 }
6588 6588
6589 group = group->next; 6589 group = group->next;
@@ -6774,11 +6774,39 @@ static struct root_domain *alloc_rootdomain(void)
6774 return rd; 6774 return rd;
6775} 6775}
6776 6776
6777static void free_sched_groups(struct sched_group *sg, int free_sgp)
6778{
6779 struct sched_group *tmp, *first;
6780
6781 if (!sg)
6782 return;
6783
6784 first = sg;
6785 do {
6786 tmp = sg->next;
6787
6788 if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
6789 kfree(sg->sgp);
6790
6791 kfree(sg);
6792 sg = tmp;
6793 } while (sg != first);
6794}
6795
6777static void free_sched_domain(struct rcu_head *rcu) 6796static void free_sched_domain(struct rcu_head *rcu)
6778{ 6797{
6779 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 6798 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
6780 if (atomic_dec_and_test(&sd->groups->ref)) 6799
6800 /*
6801 * If its an overlapping domain it has private groups, iterate and
6802 * nuke them all.
6803 */
6804 if (sd->flags & SD_OVERLAP) {
6805 free_sched_groups(sd->groups, 1);
6806 } else if (atomic_dec_and_test(&sd->groups->ref)) {
6807 kfree(sd->groups->sgp);
6781 kfree(sd->groups); 6808 kfree(sd->groups);
6809 }
6782 kfree(sd); 6810 kfree(sd);
6783} 6811}
6784 6812
@@ -6945,6 +6973,7 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
6945struct sd_data { 6973struct sd_data {
6946 struct sched_domain **__percpu sd; 6974 struct sched_domain **__percpu sd;
6947 struct sched_group **__percpu sg; 6975 struct sched_group **__percpu sg;
6976 struct sched_group_power **__percpu sgp;
6948}; 6977};
6949 6978
6950struct s_data { 6979struct s_data {
@@ -6964,15 +6993,73 @@ struct sched_domain_topology_level;
6964typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu); 6993typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
6965typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); 6994typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
6966 6995
6996#define SDTL_OVERLAP 0x01
6997
6967struct sched_domain_topology_level { 6998struct sched_domain_topology_level {
6968 sched_domain_init_f init; 6999 sched_domain_init_f init;
6969 sched_domain_mask_f mask; 7000 sched_domain_mask_f mask;
7001 int flags;
6970 struct sd_data data; 7002 struct sd_data data;
6971}; 7003};
6972 7004
6973/* 7005static int
6974 * Assumes the sched_domain tree is fully constructed 7006build_overlap_sched_groups(struct sched_domain *sd, int cpu)
6975 */ 7007{
7008 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
7009 const struct cpumask *span = sched_domain_span(sd);
7010 struct cpumask *covered = sched_domains_tmpmask;
7011 struct sd_data *sdd = sd->private;
7012 struct sched_domain *child;
7013 int i;
7014
7015 cpumask_clear(covered);
7016
7017 for_each_cpu(i, span) {
7018 struct cpumask *sg_span;
7019
7020 if (cpumask_test_cpu(i, covered))
7021 continue;
7022
7023 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
7024 GFP_KERNEL, cpu_to_node(i));
7025
7026 if (!sg)
7027 goto fail;
7028
7029 sg_span = sched_group_cpus(sg);
7030
7031 child = *per_cpu_ptr(sdd->sd, i);
7032 if (child->child) {
7033 child = child->child;
7034 cpumask_copy(sg_span, sched_domain_span(child));
7035 } else
7036 cpumask_set_cpu(i, sg_span);
7037
7038 cpumask_or(covered, covered, sg_span);
7039
7040 sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
7041 atomic_inc(&sg->sgp->ref);
7042
7043 if (cpumask_test_cpu(cpu, sg_span))
7044 groups = sg;
7045
7046 if (!first)
7047 first = sg;
7048 if (last)
7049 last->next = sg;
7050 last = sg;
7051 last->next = first;
7052 }
7053 sd->groups = groups;
7054
7055 return 0;
7056
7057fail:
7058 free_sched_groups(first, 0);
7059
7060 return -ENOMEM;
7061}
7062
6976static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) 7063static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
6977{ 7064{
6978 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 7065 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
@@ -6981,24 +7068,24 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
6981 if (child) 7068 if (child)
6982 cpu = cpumask_first(sched_domain_span(child)); 7069 cpu = cpumask_first(sched_domain_span(child));
6983 7070
6984 if (sg) 7071 if (sg) {
6985 *sg = *per_cpu_ptr(sdd->sg, cpu); 7072 *sg = *per_cpu_ptr(sdd->sg, cpu);
7073 (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
7074 atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
7075 }
6986 7076
6987 return cpu; 7077 return cpu;
6988} 7078}
6989 7079
6990/* 7080/*
6991 * build_sched_groups takes the cpumask we wish to span, and a pointer
6992 * to a function which identifies what group(along with sched group) a CPU
6993 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
6994 * (due to the fact that we keep track of groups covered with a struct cpumask).
6995 *
6996 * build_sched_groups will build a circular linked list of the groups 7081 * build_sched_groups will build a circular linked list of the groups
6997 * covered by the given span, and will set each group's ->cpumask correctly, 7082 * covered by the given span, and will set each group's ->cpumask correctly,
6998 * and ->cpu_power to 0. 7083 * and ->cpu_power to 0.
7084 *
7085 * Assumes the sched_domain tree is fully constructed
6999 */ 7086 */
7000static void 7087static int
7001build_sched_groups(struct sched_domain *sd) 7088build_sched_groups(struct sched_domain *sd, int cpu)
7002{ 7089{
7003 struct sched_group *first = NULL, *last = NULL; 7090 struct sched_group *first = NULL, *last = NULL;
7004 struct sd_data *sdd = sd->private; 7091 struct sd_data *sdd = sd->private;
@@ -7006,6 +7093,12 @@ build_sched_groups(struct sched_domain *sd)
7006 struct cpumask *covered; 7093 struct cpumask *covered;
7007 int i; 7094 int i;
7008 7095
7096 get_group(cpu, sdd, &sd->groups);
7097 atomic_inc(&sd->groups->ref);
7098
7099 if (cpu != cpumask_first(sched_domain_span(sd)))
7100 return 0;
7101
7009 lockdep_assert_held(&sched_domains_mutex); 7102 lockdep_assert_held(&sched_domains_mutex);
7010 covered = sched_domains_tmpmask; 7103 covered = sched_domains_tmpmask;
7011 7104
@@ -7020,7 +7113,7 @@ build_sched_groups(struct sched_domain *sd)
7020 continue; 7113 continue;
7021 7114
7022 cpumask_clear(sched_group_cpus(sg)); 7115 cpumask_clear(sched_group_cpus(sg));
7023 sg->cpu_power = 0; 7116 sg->sgp->power = 0;
7024 7117
7025 for_each_cpu(j, span) { 7118 for_each_cpu(j, span) {
7026 if (get_group(j, sdd, NULL) != group) 7119 if (get_group(j, sdd, NULL) != group)
@@ -7037,6 +7130,8 @@ build_sched_groups(struct sched_domain *sd)
7037 last = sg; 7130 last = sg;
7038 } 7131 }
7039 last->next = first; 7132 last->next = first;
7133
7134 return 0;
7040} 7135}
7041 7136
7042/* 7137/*
@@ -7051,12 +7146,17 @@ build_sched_groups(struct sched_domain *sd)
7051 */ 7146 */
7052static void init_sched_groups_power(int cpu, struct sched_domain *sd) 7147static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7053{ 7148{
7054 WARN_ON(!sd || !sd->groups); 7149 struct sched_group *sg = sd->groups;
7055 7150
7056 if (cpu != group_first_cpu(sd->groups)) 7151 WARN_ON(!sd || !sg);
7057 return;
7058 7152
7059 sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); 7153 do {
7154 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
7155 sg = sg->next;
7156 } while (sg != sd->groups);
7157
7158 if (cpu != group_first_cpu(sg))
7159 return;
7060 7160
7061 update_group_power(sd, cpu); 7161 update_group_power(sd, cpu);
7062} 7162}
@@ -7177,15 +7277,15 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
7177static void claim_allocations(int cpu, struct sched_domain *sd) 7277static void claim_allocations(int cpu, struct sched_domain *sd)
7178{ 7278{
7179 struct sd_data *sdd = sd->private; 7279 struct sd_data *sdd = sd->private;
7180 struct sched_group *sg = sd->groups;
7181 7280
7182 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 7281 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
7183 *per_cpu_ptr(sdd->sd, cpu) = NULL; 7282 *per_cpu_ptr(sdd->sd, cpu) = NULL;
7184 7283
7185 if (cpu == cpumask_first(sched_group_cpus(sg))) { 7284 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
7186 WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg);
7187 *per_cpu_ptr(sdd->sg, cpu) = NULL; 7285 *per_cpu_ptr(sdd->sg, cpu) = NULL;
7188 } 7286
7287 if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
7288 *per_cpu_ptr(sdd->sgp, cpu) = NULL;
7189} 7289}
7190 7290
7191#ifdef CONFIG_SCHED_SMT 7291#ifdef CONFIG_SCHED_SMT
@@ -7210,7 +7310,7 @@ static struct sched_domain_topology_level default_topology[] = {
7210#endif 7310#endif
7211 { sd_init_CPU, cpu_cpu_mask, }, 7311 { sd_init_CPU, cpu_cpu_mask, },
7212#ifdef CONFIG_NUMA 7312#ifdef CONFIG_NUMA
7213 { sd_init_NODE, cpu_node_mask, }, 7313 { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
7214 { sd_init_ALLNODES, cpu_allnodes_mask, }, 7314 { sd_init_ALLNODES, cpu_allnodes_mask, },
7215#endif 7315#endif
7216 { NULL, }, 7316 { NULL, },
@@ -7234,9 +7334,14 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
7234 if (!sdd->sg) 7334 if (!sdd->sg)
7235 return -ENOMEM; 7335 return -ENOMEM;
7236 7336
7337 sdd->sgp = alloc_percpu(struct sched_group_power *);
7338 if (!sdd->sgp)
7339 return -ENOMEM;
7340
7237 for_each_cpu(j, cpu_map) { 7341 for_each_cpu(j, cpu_map) {
7238 struct sched_domain *sd; 7342 struct sched_domain *sd;
7239 struct sched_group *sg; 7343 struct sched_group *sg;
7344 struct sched_group_power *sgp;
7240 7345
7241 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 7346 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
7242 GFP_KERNEL, cpu_to_node(j)); 7347 GFP_KERNEL, cpu_to_node(j));
@@ -7251,6 +7356,13 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
7251 return -ENOMEM; 7356 return -ENOMEM;
7252 7357
7253 *per_cpu_ptr(sdd->sg, j) = sg; 7358 *per_cpu_ptr(sdd->sg, j) = sg;
7359
7360 sgp = kzalloc_node(sizeof(struct sched_group_power),
7361 GFP_KERNEL, cpu_to_node(j));
7362 if (!sgp)
7363 return -ENOMEM;
7364
7365 *per_cpu_ptr(sdd->sgp, j) = sgp;
7254 } 7366 }
7255 } 7367 }
7256 7368
@@ -7266,11 +7378,15 @@ static void __sdt_free(const struct cpumask *cpu_map)
7266 struct sd_data *sdd = &tl->data; 7378 struct sd_data *sdd = &tl->data;
7267 7379
7268 for_each_cpu(j, cpu_map) { 7380 for_each_cpu(j, cpu_map) {
7269 kfree(*per_cpu_ptr(sdd->sd, j)); 7381 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
7382 if (sd && (sd->flags & SD_OVERLAP))
7383 free_sched_groups(sd->groups, 0);
7270 kfree(*per_cpu_ptr(sdd->sg, j)); 7384 kfree(*per_cpu_ptr(sdd->sg, j));
7385 kfree(*per_cpu_ptr(sdd->sgp, j));
7271 } 7386 }
7272 free_percpu(sdd->sd); 7387 free_percpu(sdd->sd);
7273 free_percpu(sdd->sg); 7388 free_percpu(sdd->sg);
7389 free_percpu(sdd->sgp);
7274 } 7390 }
7275} 7391}
7276 7392
@@ -7316,8 +7432,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
7316 struct sched_domain_topology_level *tl; 7432 struct sched_domain_topology_level *tl;
7317 7433
7318 sd = NULL; 7434 sd = NULL;
7319 for (tl = sched_domain_topology; tl->init; tl++) 7435 for (tl = sched_domain_topology; tl->init; tl++) {
7320 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i); 7436 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
7437 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
7438 sd->flags |= SD_OVERLAP;
7439 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
7440 break;
7441 }
7321 7442
7322 while (sd->child) 7443 while (sd->child)
7323 sd = sd->child; 7444 sd = sd->child;
@@ -7329,13 +7450,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
7329 for_each_cpu(i, cpu_map) { 7450 for_each_cpu(i, cpu_map) {
7330 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 7451 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
7331 sd->span_weight = cpumask_weight(sched_domain_span(sd)); 7452 sd->span_weight = cpumask_weight(sched_domain_span(sd));
7332 get_group(i, sd->private, &sd->groups); 7453 if (sd->flags & SD_OVERLAP) {
7333 atomic_inc(&sd->groups->ref); 7454 if (build_overlap_sched_groups(sd, i))
7334 7455 goto error;
7335 if (i != cpumask_first(sched_domain_span(sd))) 7456 } else {
7336 continue; 7457 if (build_sched_groups(sd, i))
7337 7458 goto error;
7338 build_sched_groups(sd); 7459 }
7339 } 7460 }
7340 } 7461 }
7341 7462
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 433491c2dc8f..c768588e180b 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1585,7 +1585,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1585 } 1585 }
1586 1586
1587 /* Adjust by relative CPU power of the group */ 1587 /* Adjust by relative CPU power of the group */
1588 avg_load = (avg_load * SCHED_POWER_SCALE) / group->cpu_power; 1588 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
1589 1589
1590 if (local_group) { 1590 if (local_group) {
1591 this_load = avg_load; 1591 this_load = avg_load;
@@ -2631,7 +2631,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
2631 power >>= SCHED_POWER_SHIFT; 2631 power >>= SCHED_POWER_SHIFT;
2632 } 2632 }
2633 2633
2634 sdg->cpu_power_orig = power; 2634 sdg->sgp->power_orig = power;
2635 2635
2636 if (sched_feat(ARCH_POWER)) 2636 if (sched_feat(ARCH_POWER))
2637 power *= arch_scale_freq_power(sd, cpu); 2637 power *= arch_scale_freq_power(sd, cpu);
@@ -2647,7 +2647,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
2647 power = 1; 2647 power = 1;
2648 2648
2649 cpu_rq(cpu)->cpu_power = power; 2649 cpu_rq(cpu)->cpu_power = power;
2650 sdg->cpu_power = power; 2650 sdg->sgp->power = power;
2651} 2651}
2652 2652
2653static void update_group_power(struct sched_domain *sd, int cpu) 2653static void update_group_power(struct sched_domain *sd, int cpu)
@@ -2665,11 +2665,11 @@ static void update_group_power(struct sched_domain *sd, int cpu)
2665 2665
2666 group = child->groups; 2666 group = child->groups;
2667 do { 2667 do {
2668 power += group->cpu_power; 2668 power += group->sgp->power;
2669 group = group->next; 2669 group = group->next;
2670 } while (group != child->groups); 2670 } while (group != child->groups);
2671 2671
2672 sdg->cpu_power = power; 2672 sdg->sgp->power = power;
2673} 2673}
2674 2674
2675/* 2675/*
@@ -2691,7 +2691,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
2691 /* 2691 /*
2692 * If ~90% of the cpu_power is still there, we're good. 2692 * If ~90% of the cpu_power is still there, we're good.
2693 */ 2693 */
2694 if (group->cpu_power * 32 > group->cpu_power_orig * 29) 2694 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
2695 return 1; 2695 return 1;
2696 2696
2697 return 0; 2697 return 0;
@@ -2771,7 +2771,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
2771 } 2771 }
2772 2772
2773 /* Adjust by relative CPU power of the group */ 2773 /* Adjust by relative CPU power of the group */
2774 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->cpu_power; 2774 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
2775 2775
2776 /* 2776 /*
2777 * Consider the group unbalanced when the imbalance is larger 2777 * Consider the group unbalanced when the imbalance is larger
@@ -2788,7 +2788,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
2788 if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1) 2788 if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
2789 sgs->group_imb = 1; 2789 sgs->group_imb = 1;
2790 2790
2791 sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, 2791 sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
2792 SCHED_POWER_SCALE); 2792 SCHED_POWER_SCALE);
2793 if (!sgs->group_capacity) 2793 if (!sgs->group_capacity)
2794 sgs->group_capacity = fix_small_capacity(sd, group); 2794 sgs->group_capacity = fix_small_capacity(sd, group);
@@ -2877,7 +2877,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
2877 return; 2877 return;
2878 2878
2879 sds->total_load += sgs.group_load; 2879 sds->total_load += sgs.group_load;
2880 sds->total_pwr += sg->cpu_power; 2880 sds->total_pwr += sg->sgp->power;
2881 2881
2882 /* 2882 /*
2883 * In case the child domain prefers tasks go to siblings 2883 * In case the child domain prefers tasks go to siblings
@@ -2962,7 +2962,7 @@ static int check_asym_packing(struct sched_domain *sd,
2962 if (this_cpu > busiest_cpu) 2962 if (this_cpu > busiest_cpu)
2963 return 0; 2963 return 0;
2964 2964
2965 *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power, 2965 *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
2966 SCHED_POWER_SCALE); 2966 SCHED_POWER_SCALE);
2967 return 1; 2967 return 1;
2968} 2968}
@@ -2993,7 +2993,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
2993 2993
2994 scaled_busy_load_per_task = sds->busiest_load_per_task 2994 scaled_busy_load_per_task = sds->busiest_load_per_task
2995 * SCHED_POWER_SCALE; 2995 * SCHED_POWER_SCALE;
2996 scaled_busy_load_per_task /= sds->busiest->cpu_power; 2996 scaled_busy_load_per_task /= sds->busiest->sgp->power;
2997 2997
2998 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >= 2998 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
2999 (scaled_busy_load_per_task * imbn)) { 2999 (scaled_busy_load_per_task * imbn)) {
@@ -3007,28 +3007,28 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3007 * moving them. 3007 * moving them.
3008 */ 3008 */
3009 3009
3010 pwr_now += sds->busiest->cpu_power * 3010 pwr_now += sds->busiest->sgp->power *
3011 min(sds->busiest_load_per_task, sds->max_load); 3011 min(sds->busiest_load_per_task, sds->max_load);
3012 pwr_now += sds->this->cpu_power * 3012 pwr_now += sds->this->sgp->power *
3013 min(sds->this_load_per_task, sds->this_load); 3013 min(sds->this_load_per_task, sds->this_load);
3014 pwr_now /= SCHED_POWER_SCALE; 3014 pwr_now /= SCHED_POWER_SCALE;
3015 3015
3016 /* Amount of load we'd subtract */ 3016 /* Amount of load we'd subtract */
3017 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / 3017 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
3018 sds->busiest->cpu_power; 3018 sds->busiest->sgp->power;
3019 if (sds->max_load > tmp) 3019 if (sds->max_load > tmp)
3020 pwr_move += sds->busiest->cpu_power * 3020 pwr_move += sds->busiest->sgp->power *
3021 min(sds->busiest_load_per_task, sds->max_load - tmp); 3021 min(sds->busiest_load_per_task, sds->max_load - tmp);
3022 3022
3023 /* Amount of load we'd add */ 3023 /* Amount of load we'd add */
3024 if (sds->max_load * sds->busiest->cpu_power < 3024 if (sds->max_load * sds->busiest->sgp->power <
3025 sds->busiest_load_per_task * SCHED_POWER_SCALE) 3025 sds->busiest_load_per_task * SCHED_POWER_SCALE)
3026 tmp = (sds->max_load * sds->busiest->cpu_power) / 3026 tmp = (sds->max_load * sds->busiest->sgp->power) /
3027 sds->this->cpu_power; 3027 sds->this->sgp->power;
3028 else 3028 else
3029 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / 3029 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
3030 sds->this->cpu_power; 3030 sds->this->sgp->power;
3031 pwr_move += sds->this->cpu_power * 3031 pwr_move += sds->this->sgp->power *
3032 min(sds->this_load_per_task, sds->this_load + tmp); 3032 min(sds->this_load_per_task, sds->this_load + tmp);
3033 pwr_move /= SCHED_POWER_SCALE; 3033 pwr_move /= SCHED_POWER_SCALE;
3034 3034
@@ -3074,7 +3074,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3074 3074
3075 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE); 3075 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
3076 3076
3077 load_above_capacity /= sds->busiest->cpu_power; 3077 load_above_capacity /= sds->busiest->sgp->power;
3078 } 3078 }
3079 3079
3080 /* 3080 /*
@@ -3090,8 +3090,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3090 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity); 3090 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
3091 3091
3092 /* How much load to actually move to equalise the imbalance */ 3092 /* How much load to actually move to equalise the imbalance */
3093 *imbalance = min(max_pull * sds->busiest->cpu_power, 3093 *imbalance = min(max_pull * sds->busiest->sgp->power,
3094 (sds->avg_load - sds->this_load) * sds->this->cpu_power) 3094 (sds->avg_load - sds->this_load) * sds->this->sgp->power)
3095 / SCHED_POWER_SCALE; 3095 / SCHED_POWER_SCALE;
3096 3096
3097 /* 3097 /*
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index be40f7371ee1..1e7066d76c26 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -70,3 +70,5 @@ SCHED_FEAT(NONIRQ_POWER, 1)
70 * using the scheduler IPI. Reduces rq->lock contention/bounces. 70 * using the scheduler IPI. Reduces rq->lock contention/bounces.
71 */ 71 */
72SCHED_FEAT(TTWU_QUEUE, 1) 72SCHED_FEAT(TTWU_QUEUE, 1)
73
74SCHED_FEAT(FORCE_SD_OVERLAP, 0)