aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/x86/boot.txt2
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--include/linux/sched.h14
-rw-r--r--kernel/sched.c189
-rw-r--r--kernel/sched_fair.c46
-rw-r--r--kernel/sched_features.h2
7 files changed, 200 insertions, 63 deletions
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index 9b7221a86df2..7c3a8801b7ce 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -674,7 +674,7 @@ Protocol: 2.10+
674 674
675Field name: init_size 675Field name: init_size
676Type: read 676Type: read
677Offset/size: 0x25c/4 677Offset/size: 0x260/4
678 678
679 This field indicates the amount of linear contiguous memory starting 679 This field indicates the amount of linear contiguous memory starting
680 at the kernel runtime start address that the kernel needs before it 680 at the kernel runtime start address that the kernel needs before it
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index da349723d411..37357a599dca 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1170,7 +1170,7 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
1170config AMD_NUMA 1170config AMD_NUMA
1171 def_bool y 1171 def_bool y
1172 prompt "Old style AMD Opteron NUMA detection" 1172 prompt "Old style AMD Opteron NUMA detection"
1173 depends on NUMA && PCI 1173 depends on X86_64 && NUMA && PCI
1174 ---help--- 1174 ---help---
1175 Enable AMD NUMA node topology detection. You should say Y here if 1175 Enable AMD NUMA node topology detection. You should say Y here if
1176 you have a multi processor AMD system. This uses an old method to 1176 you have a multi processor AMD system. This uses an old method to
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 4f0d46fefa7f..14eed214b584 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -419,6 +419,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
419 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"), 419 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
420 }, 420 },
421 }, 421 },
422 { /* Handle problems with rebooting on the Latitude E6320. */
423 .callback = set_pci_reboot,
424 .ident = "Dell Latitude E6320",
425 .matches = {
426 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
427 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
428 },
429 },
422 { } 430 { }
423}; 431};
424 432
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 76676a407e4a..14a6c7b545de 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -844,6 +844,7 @@ enum cpu_idle_type {
844#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ 844#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
845#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ 845#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
846#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ 846#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
847#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
847 848
848enum powersavings_balance_level { 849enum powersavings_balance_level {
849 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ 850 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
@@ -893,16 +894,21 @@ static inline int sd_power_saving_flags(void)
893 return 0; 894 return 0;
894} 895}
895 896
896struct sched_group { 897struct sched_group_power {
897 struct sched_group *next; /* Must be a circular list */
898 atomic_t ref; 898 atomic_t ref;
899
900 /* 899 /*
901 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 900 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
902 * single CPU. 901 * single CPU.
903 */ 902 */
904 unsigned int cpu_power, cpu_power_orig; 903 unsigned int power, power_orig;
904};
905
906struct sched_group {
907 struct sched_group *next; /* Must be a circular list */
908 atomic_t ref;
909
905 unsigned int group_weight; 910 unsigned int group_weight;
911 struct sched_group_power *sgp;
906 912
907 /* 913 /*
908 * The CPUs this group covers. 914 * The CPUs this group covers.
diff --git a/kernel/sched.c b/kernel/sched.c
index 31e92aee6242..fde6ff903525 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6589,7 +6589,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6589 break; 6589 break;
6590 } 6590 }
6591 6591
6592 if (!group->cpu_power) { 6592 if (!group->sgp->power) {
6593 printk(KERN_CONT "\n"); 6593 printk(KERN_CONT "\n");
6594 printk(KERN_ERR "ERROR: domain->cpu_power not " 6594 printk(KERN_ERR "ERROR: domain->cpu_power not "
6595 "set\n"); 6595 "set\n");
@@ -6613,9 +6613,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6613 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 6613 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
6614 6614
6615 printk(KERN_CONT " %s", str); 6615 printk(KERN_CONT " %s", str);
6616 if (group->cpu_power != SCHED_POWER_SCALE) { 6616 if (group->sgp->power != SCHED_POWER_SCALE) {
6617 printk(KERN_CONT " (cpu_power = %d)", 6617 printk(KERN_CONT " (cpu_power = %d)",
6618 group->cpu_power); 6618 group->sgp->power);
6619 } 6619 }
6620 6620
6621 group = group->next; 6621 group = group->next;
@@ -6806,11 +6806,39 @@ static struct root_domain *alloc_rootdomain(void)
6806 return rd; 6806 return rd;
6807} 6807}
6808 6808
6809static void free_sched_groups(struct sched_group *sg, int free_sgp)
6810{
6811 struct sched_group *tmp, *first;
6812
6813 if (!sg)
6814 return;
6815
6816 first = sg;
6817 do {
6818 tmp = sg->next;
6819
6820 if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
6821 kfree(sg->sgp);
6822
6823 kfree(sg);
6824 sg = tmp;
6825 } while (sg != first);
6826}
6827
6809static void free_sched_domain(struct rcu_head *rcu) 6828static void free_sched_domain(struct rcu_head *rcu)
6810{ 6829{
6811 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 6830 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
6812 if (atomic_dec_and_test(&sd->groups->ref)) 6831
6832 /*
6833 * If its an overlapping domain it has private groups, iterate and
6834 * nuke them all.
6835 */
6836 if (sd->flags & SD_OVERLAP) {
6837 free_sched_groups(sd->groups, 1);
6838 } else if (atomic_dec_and_test(&sd->groups->ref)) {
6839 kfree(sd->groups->sgp);
6813 kfree(sd->groups); 6840 kfree(sd->groups);
6841 }
6814 kfree(sd); 6842 kfree(sd);
6815} 6843}
6816 6844
@@ -6977,6 +7005,7 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
6977struct sd_data { 7005struct sd_data {
6978 struct sched_domain **__percpu sd; 7006 struct sched_domain **__percpu sd;
6979 struct sched_group **__percpu sg; 7007 struct sched_group **__percpu sg;
7008 struct sched_group_power **__percpu sgp;
6980}; 7009};
6981 7010
6982struct s_data { 7011struct s_data {
@@ -6996,15 +7025,73 @@ struct sched_domain_topology_level;
6996typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu); 7025typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
6997typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); 7026typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
6998 7027
7028#define SDTL_OVERLAP 0x01
7029
6999struct sched_domain_topology_level { 7030struct sched_domain_topology_level {
7000 sched_domain_init_f init; 7031 sched_domain_init_f init;
7001 sched_domain_mask_f mask; 7032 sched_domain_mask_f mask;
7033 int flags;
7002 struct sd_data data; 7034 struct sd_data data;
7003}; 7035};
7004 7036
7005/* 7037static int
7006 * Assumes the sched_domain tree is fully constructed 7038build_overlap_sched_groups(struct sched_domain *sd, int cpu)
7007 */ 7039{
7040 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
7041 const struct cpumask *span = sched_domain_span(sd);
7042 struct cpumask *covered = sched_domains_tmpmask;
7043 struct sd_data *sdd = sd->private;
7044 struct sched_domain *child;
7045 int i;
7046
7047 cpumask_clear(covered);
7048
7049 for_each_cpu(i, span) {
7050 struct cpumask *sg_span;
7051
7052 if (cpumask_test_cpu(i, covered))
7053 continue;
7054
7055 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
7056 GFP_KERNEL, cpu_to_node(i));
7057
7058 if (!sg)
7059 goto fail;
7060
7061 sg_span = sched_group_cpus(sg);
7062
7063 child = *per_cpu_ptr(sdd->sd, i);
7064 if (child->child) {
7065 child = child->child;
7066 cpumask_copy(sg_span, sched_domain_span(child));
7067 } else
7068 cpumask_set_cpu(i, sg_span);
7069
7070 cpumask_or(covered, covered, sg_span);
7071
7072 sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
7073 atomic_inc(&sg->sgp->ref);
7074
7075 if (cpumask_test_cpu(cpu, sg_span))
7076 groups = sg;
7077
7078 if (!first)
7079 first = sg;
7080 if (last)
7081 last->next = sg;
7082 last = sg;
7083 last->next = first;
7084 }
7085 sd->groups = groups;
7086
7087 return 0;
7088
7089fail:
7090 free_sched_groups(first, 0);
7091
7092 return -ENOMEM;
7093}
7094
7008static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) 7095static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
7009{ 7096{
7010 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 7097 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
@@ -7013,24 +7100,24 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
7013 if (child) 7100 if (child)
7014 cpu = cpumask_first(sched_domain_span(child)); 7101 cpu = cpumask_first(sched_domain_span(child));
7015 7102
7016 if (sg) 7103 if (sg) {
7017 *sg = *per_cpu_ptr(sdd->sg, cpu); 7104 *sg = *per_cpu_ptr(sdd->sg, cpu);
7105 (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
7106 atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
7107 }
7018 7108
7019 return cpu; 7109 return cpu;
7020} 7110}
7021 7111
7022/* 7112/*
7023 * build_sched_groups takes the cpumask we wish to span, and a pointer
7024 * to a function which identifies what group(along with sched group) a CPU
7025 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
7026 * (due to the fact that we keep track of groups covered with a struct cpumask).
7027 *
7028 * build_sched_groups will build a circular linked list of the groups 7113 * build_sched_groups will build a circular linked list of the groups
7029 * covered by the given span, and will set each group's ->cpumask correctly, 7114 * covered by the given span, and will set each group's ->cpumask correctly,
7030 * and ->cpu_power to 0. 7115 * and ->cpu_power to 0.
7116 *
7117 * Assumes the sched_domain tree is fully constructed
7031 */ 7118 */
7032static void 7119static int
7033build_sched_groups(struct sched_domain *sd) 7120build_sched_groups(struct sched_domain *sd, int cpu)
7034{ 7121{
7035 struct sched_group *first = NULL, *last = NULL; 7122 struct sched_group *first = NULL, *last = NULL;
7036 struct sd_data *sdd = sd->private; 7123 struct sd_data *sdd = sd->private;
@@ -7038,6 +7125,12 @@ build_sched_groups(struct sched_domain *sd)
7038 struct cpumask *covered; 7125 struct cpumask *covered;
7039 int i; 7126 int i;
7040 7127
7128 get_group(cpu, sdd, &sd->groups);
7129 atomic_inc(&sd->groups->ref);
7130
7131 if (cpu != cpumask_first(sched_domain_span(sd)))
7132 return 0;
7133
7041 lockdep_assert_held(&sched_domains_mutex); 7134 lockdep_assert_held(&sched_domains_mutex);
7042 covered = sched_domains_tmpmask; 7135 covered = sched_domains_tmpmask;
7043 7136
@@ -7052,7 +7145,7 @@ build_sched_groups(struct sched_domain *sd)
7052 continue; 7145 continue;
7053 7146
7054 cpumask_clear(sched_group_cpus(sg)); 7147 cpumask_clear(sched_group_cpus(sg));
7055 sg->cpu_power = 0; 7148 sg->sgp->power = 0;
7056 7149
7057 for_each_cpu(j, span) { 7150 for_each_cpu(j, span) {
7058 if (get_group(j, sdd, NULL) != group) 7151 if (get_group(j, sdd, NULL) != group)
@@ -7069,6 +7162,8 @@ build_sched_groups(struct sched_domain *sd)
7069 last = sg; 7162 last = sg;
7070 } 7163 }
7071 last->next = first; 7164 last->next = first;
7165
7166 return 0;
7072} 7167}
7073 7168
7074/* 7169/*
@@ -7083,12 +7178,17 @@ build_sched_groups(struct sched_domain *sd)
7083 */ 7178 */
7084static void init_sched_groups_power(int cpu, struct sched_domain *sd) 7179static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7085{ 7180{
7086 WARN_ON(!sd || !sd->groups); 7181 struct sched_group *sg = sd->groups;
7087 7182
7088 if (cpu != group_first_cpu(sd->groups)) 7183 WARN_ON(!sd || !sg);
7089 return;
7090 7184
7091 sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); 7185 do {
7186 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
7187 sg = sg->next;
7188 } while (sg != sd->groups);
7189
7190 if (cpu != group_first_cpu(sg))
7191 return;
7092 7192
7093 update_group_power(sd, cpu); 7193 update_group_power(sd, cpu);
7094} 7194}
@@ -7209,15 +7309,15 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
7209static void claim_allocations(int cpu, struct sched_domain *sd) 7309static void claim_allocations(int cpu, struct sched_domain *sd)
7210{ 7310{
7211 struct sd_data *sdd = sd->private; 7311 struct sd_data *sdd = sd->private;
7212 struct sched_group *sg = sd->groups;
7213 7312
7214 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 7313 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
7215 *per_cpu_ptr(sdd->sd, cpu) = NULL; 7314 *per_cpu_ptr(sdd->sd, cpu) = NULL;
7216 7315
7217 if (cpu == cpumask_first(sched_group_cpus(sg))) { 7316 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
7218 WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg);
7219 *per_cpu_ptr(sdd->sg, cpu) = NULL; 7317 *per_cpu_ptr(sdd->sg, cpu) = NULL;
7220 } 7318
7319 if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
7320 *per_cpu_ptr(sdd->sgp, cpu) = NULL;
7221} 7321}
7222 7322
7223#ifdef CONFIG_SCHED_SMT 7323#ifdef CONFIG_SCHED_SMT
@@ -7242,7 +7342,7 @@ static struct sched_domain_topology_level default_topology[] = {
7242#endif 7342#endif
7243 { sd_init_CPU, cpu_cpu_mask, }, 7343 { sd_init_CPU, cpu_cpu_mask, },
7244#ifdef CONFIG_NUMA 7344#ifdef CONFIG_NUMA
7245 { sd_init_NODE, cpu_node_mask, }, 7345 { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
7246 { sd_init_ALLNODES, cpu_allnodes_mask, }, 7346 { sd_init_ALLNODES, cpu_allnodes_mask, },
7247#endif 7347#endif
7248 { NULL, }, 7348 { NULL, },
@@ -7266,9 +7366,14 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
7266 if (!sdd->sg) 7366 if (!sdd->sg)
7267 return -ENOMEM; 7367 return -ENOMEM;
7268 7368
7369 sdd->sgp = alloc_percpu(struct sched_group_power *);
7370 if (!sdd->sgp)
7371 return -ENOMEM;
7372
7269 for_each_cpu(j, cpu_map) { 7373 for_each_cpu(j, cpu_map) {
7270 struct sched_domain *sd; 7374 struct sched_domain *sd;
7271 struct sched_group *sg; 7375 struct sched_group *sg;
7376 struct sched_group_power *sgp;
7272 7377
7273 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 7378 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
7274 GFP_KERNEL, cpu_to_node(j)); 7379 GFP_KERNEL, cpu_to_node(j));
@@ -7283,6 +7388,13 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
7283 return -ENOMEM; 7388 return -ENOMEM;
7284 7389
7285 *per_cpu_ptr(sdd->sg, j) = sg; 7390 *per_cpu_ptr(sdd->sg, j) = sg;
7391
7392 sgp = kzalloc_node(sizeof(struct sched_group_power),
7393 GFP_KERNEL, cpu_to_node(j));
7394 if (!sgp)
7395 return -ENOMEM;
7396
7397 *per_cpu_ptr(sdd->sgp, j) = sgp;
7286 } 7398 }
7287 } 7399 }
7288 7400
@@ -7298,11 +7410,15 @@ static void __sdt_free(const struct cpumask *cpu_map)
7298 struct sd_data *sdd = &tl->data; 7410 struct sd_data *sdd = &tl->data;
7299 7411
7300 for_each_cpu(j, cpu_map) { 7412 for_each_cpu(j, cpu_map) {
7301 kfree(*per_cpu_ptr(sdd->sd, j)); 7413 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
7414 if (sd && (sd->flags & SD_OVERLAP))
7415 free_sched_groups(sd->groups, 0);
7302 kfree(*per_cpu_ptr(sdd->sg, j)); 7416 kfree(*per_cpu_ptr(sdd->sg, j));
7417 kfree(*per_cpu_ptr(sdd->sgp, j));
7303 } 7418 }
7304 free_percpu(sdd->sd); 7419 free_percpu(sdd->sd);
7305 free_percpu(sdd->sg); 7420 free_percpu(sdd->sg);
7421 free_percpu(sdd->sgp);
7306 } 7422 }
7307} 7423}
7308 7424
@@ -7348,8 +7464,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
7348 struct sched_domain_topology_level *tl; 7464 struct sched_domain_topology_level *tl;
7349 7465
7350 sd = NULL; 7466 sd = NULL;
7351 for (tl = sched_domain_topology; tl->init; tl++) 7467 for (tl = sched_domain_topology; tl->init; tl++) {
7352 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i); 7468 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
7469 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
7470 sd->flags |= SD_OVERLAP;
7471 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
7472 break;
7473 }
7353 7474
7354 while (sd->child) 7475 while (sd->child)
7355 sd = sd->child; 7476 sd = sd->child;
@@ -7361,13 +7482,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
7361 for_each_cpu(i, cpu_map) { 7482 for_each_cpu(i, cpu_map) {
7362 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 7483 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
7363 sd->span_weight = cpumask_weight(sched_domain_span(sd)); 7484 sd->span_weight = cpumask_weight(sched_domain_span(sd));
7364 get_group(i, sd->private, &sd->groups); 7485 if (sd->flags & SD_OVERLAP) {
7365 atomic_inc(&sd->groups->ref); 7486 if (build_overlap_sched_groups(sd, i))
7366 7487 goto error;
7367 if (i != cpumask_first(sched_domain_span(sd))) 7488 } else {
7368 continue; 7489 if (build_sched_groups(sd, i))
7369 7490 goto error;
7370 build_sched_groups(sd); 7491 }
7371 } 7492 }
7372 } 7493 }
7373 7494
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 433491c2dc8f..c768588e180b 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1585,7 +1585,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1585 } 1585 }
1586 1586
1587 /* Adjust by relative CPU power of the group */ 1587 /* Adjust by relative CPU power of the group */
1588 avg_load = (avg_load * SCHED_POWER_SCALE) / group->cpu_power; 1588 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
1589 1589
1590 if (local_group) { 1590 if (local_group) {
1591 this_load = avg_load; 1591 this_load = avg_load;
@@ -2631,7 +2631,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
2631 power >>= SCHED_POWER_SHIFT; 2631 power >>= SCHED_POWER_SHIFT;
2632 } 2632 }
2633 2633
2634 sdg->cpu_power_orig = power; 2634 sdg->sgp->power_orig = power;
2635 2635
2636 if (sched_feat(ARCH_POWER)) 2636 if (sched_feat(ARCH_POWER))
2637 power *= arch_scale_freq_power(sd, cpu); 2637 power *= arch_scale_freq_power(sd, cpu);
@@ -2647,7 +2647,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
2647 power = 1; 2647 power = 1;
2648 2648
2649 cpu_rq(cpu)->cpu_power = power; 2649 cpu_rq(cpu)->cpu_power = power;
2650 sdg->cpu_power = power; 2650 sdg->sgp->power = power;
2651} 2651}
2652 2652
2653static void update_group_power(struct sched_domain *sd, int cpu) 2653static void update_group_power(struct sched_domain *sd, int cpu)
@@ -2665,11 +2665,11 @@ static void update_group_power(struct sched_domain *sd, int cpu)
2665 2665
2666 group = child->groups; 2666 group = child->groups;
2667 do { 2667 do {
2668 power += group->cpu_power; 2668 power += group->sgp->power;
2669 group = group->next; 2669 group = group->next;
2670 } while (group != child->groups); 2670 } while (group != child->groups);
2671 2671
2672 sdg->cpu_power = power; 2672 sdg->sgp->power = power;
2673} 2673}
2674 2674
2675/* 2675/*
@@ -2691,7 +2691,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
2691 /* 2691 /*
2692 * If ~90% of the cpu_power is still there, we're good. 2692 * If ~90% of the cpu_power is still there, we're good.
2693 */ 2693 */
2694 if (group->cpu_power * 32 > group->cpu_power_orig * 29) 2694 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
2695 return 1; 2695 return 1;
2696 2696
2697 return 0; 2697 return 0;
@@ -2771,7 +2771,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
2771 } 2771 }
2772 2772
2773 /* Adjust by relative CPU power of the group */ 2773 /* Adjust by relative CPU power of the group */
2774 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->cpu_power; 2774 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
2775 2775
2776 /* 2776 /*
2777 * Consider the group unbalanced when the imbalance is larger 2777 * Consider the group unbalanced when the imbalance is larger
@@ -2788,7 +2788,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
2788 if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1) 2788 if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
2789 sgs->group_imb = 1; 2789 sgs->group_imb = 1;
2790 2790
2791 sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, 2791 sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
2792 SCHED_POWER_SCALE); 2792 SCHED_POWER_SCALE);
2793 if (!sgs->group_capacity) 2793 if (!sgs->group_capacity)
2794 sgs->group_capacity = fix_small_capacity(sd, group); 2794 sgs->group_capacity = fix_small_capacity(sd, group);
@@ -2877,7 +2877,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
2877 return; 2877 return;
2878 2878
2879 sds->total_load += sgs.group_load; 2879 sds->total_load += sgs.group_load;
2880 sds->total_pwr += sg->cpu_power; 2880 sds->total_pwr += sg->sgp->power;
2881 2881
2882 /* 2882 /*
2883 * In case the child domain prefers tasks go to siblings 2883 * In case the child domain prefers tasks go to siblings
@@ -2962,7 +2962,7 @@ static int check_asym_packing(struct sched_domain *sd,
2962 if (this_cpu > busiest_cpu) 2962 if (this_cpu > busiest_cpu)
2963 return 0; 2963 return 0;
2964 2964
2965 *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power, 2965 *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
2966 SCHED_POWER_SCALE); 2966 SCHED_POWER_SCALE);
2967 return 1; 2967 return 1;
2968} 2968}
@@ -2993,7 +2993,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
2993 2993
2994 scaled_busy_load_per_task = sds->busiest_load_per_task 2994 scaled_busy_load_per_task = sds->busiest_load_per_task
2995 * SCHED_POWER_SCALE; 2995 * SCHED_POWER_SCALE;
2996 scaled_busy_load_per_task /= sds->busiest->cpu_power; 2996 scaled_busy_load_per_task /= sds->busiest->sgp->power;
2997 2997
2998 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >= 2998 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
2999 (scaled_busy_load_per_task * imbn)) { 2999 (scaled_busy_load_per_task * imbn)) {
@@ -3007,28 +3007,28 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3007 * moving them. 3007 * moving them.
3008 */ 3008 */
3009 3009
3010 pwr_now += sds->busiest->cpu_power * 3010 pwr_now += sds->busiest->sgp->power *
3011 min(sds->busiest_load_per_task, sds->max_load); 3011 min(sds->busiest_load_per_task, sds->max_load);
3012 pwr_now += sds->this->cpu_power * 3012 pwr_now += sds->this->sgp->power *
3013 min(sds->this_load_per_task, sds->this_load); 3013 min(sds->this_load_per_task, sds->this_load);
3014 pwr_now /= SCHED_POWER_SCALE; 3014 pwr_now /= SCHED_POWER_SCALE;
3015 3015
3016 /* Amount of load we'd subtract */ 3016 /* Amount of load we'd subtract */
3017 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / 3017 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
3018 sds->busiest->cpu_power; 3018 sds->busiest->sgp->power;
3019 if (sds->max_load > tmp) 3019 if (sds->max_load > tmp)
3020 pwr_move += sds->busiest->cpu_power * 3020 pwr_move += sds->busiest->sgp->power *
3021 min(sds->busiest_load_per_task, sds->max_load - tmp); 3021 min(sds->busiest_load_per_task, sds->max_load - tmp);
3022 3022
3023 /* Amount of load we'd add */ 3023 /* Amount of load we'd add */
3024 if (sds->max_load * sds->busiest->cpu_power < 3024 if (sds->max_load * sds->busiest->sgp->power <
3025 sds->busiest_load_per_task * SCHED_POWER_SCALE) 3025 sds->busiest_load_per_task * SCHED_POWER_SCALE)
3026 tmp = (sds->max_load * sds->busiest->cpu_power) / 3026 tmp = (sds->max_load * sds->busiest->sgp->power) /
3027 sds->this->cpu_power; 3027 sds->this->sgp->power;
3028 else 3028 else
3029 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / 3029 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
3030 sds->this->cpu_power; 3030 sds->this->sgp->power;
3031 pwr_move += sds->this->cpu_power * 3031 pwr_move += sds->this->sgp->power *
3032 min(sds->this_load_per_task, sds->this_load + tmp); 3032 min(sds->this_load_per_task, sds->this_load + tmp);
3033 pwr_move /= SCHED_POWER_SCALE; 3033 pwr_move /= SCHED_POWER_SCALE;
3034 3034
@@ -3074,7 +3074,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3074 3074
3075 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE); 3075 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
3076 3076
3077 load_above_capacity /= sds->busiest->cpu_power; 3077 load_above_capacity /= sds->busiest->sgp->power;
3078 } 3078 }
3079 3079
3080 /* 3080 /*
@@ -3090,8 +3090,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3090 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity); 3090 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
3091 3091
3092 /* How much load to actually move to equalise the imbalance */ 3092 /* How much load to actually move to equalise the imbalance */
3093 *imbalance = min(max_pull * sds->busiest->cpu_power, 3093 *imbalance = min(max_pull * sds->busiest->sgp->power,
3094 (sds->avg_load - sds->this_load) * sds->this->cpu_power) 3094 (sds->avg_load - sds->this_load) * sds->this->sgp->power)
3095 / SCHED_POWER_SCALE; 3095 / SCHED_POWER_SCALE;
3096 3096
3097 /* 3097 /*
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index be40f7371ee1..1e7066d76c26 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -70,3 +70,5 @@ SCHED_FEAT(NONIRQ_POWER, 1)
70 * using the scheduler IPI. Reduces rq->lock contention/bounces. 70 * using the scheduler IPI. Reduces rq->lock contention/bounces.
71 */ 71 */
72SCHED_FEAT(TTWU_QUEUE, 1) 72SCHED_FEAT(TTWU_QUEUE, 1)
73
74SCHED_FEAT(FORCE_SD_OVERLAP, 0)