aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2014-04-11 05:44:39 -0400
committerIngo Molnar <mingo@kernel.org>2014-05-07 07:33:51 -0400
commit607b45e9a216e89a63351556e488eea06be0ff48 (patch)
tree2d275c4d7842541baff33c0486450db4acb29dd6 /kernel/sched
parent2dfd747629e65f89d2dbceb92fffc763f66228b2 (diff)
sched, powerpc: Create a dedicated topology table
Create a dedicated topology table for handling asymetric feature of powerpc. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Andy Fleming <afleming@freescale.com> Cc: Anton Blanchard <anton@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Grant Likely <grant.likely@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Preeti U. Murthy <preeti@linux.vnet.ibm.com> Cc: Rob Herring <robh+dt@kernel.org> Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Toshi Kani <toshi.kani@hp.com> Cc: Vasant Hegde <hegdevasant@linux.vnet.ibm.com> Cc: tony.luck@intel.com Cc: fenghua.yu@intel.com Cc: schwidefsky@de.ibm.com Cc: cmetcalf@tilera.com Cc: dietmar.eggemann@arm.com Cc: devicetree@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Link: http://lkml.kernel.org/r/1397209481-28542-4-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c6
1 files changed, 0 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e59e5aec745a..7e348e238bf1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5796,11 +5796,6 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
5796 atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight); 5796 atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
5797} 5797}
5798 5798
5799int __weak arch_sd_sibling_asym_packing(void)
5800{
5801 return 0*SD_ASYM_PACKING;
5802}
5803
5804/* 5799/*
5805 * Initializers for schedule domains 5800 * Initializers for schedule domains
5806 * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 5801 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
@@ -5981,7 +5976,6 @@ sd_init(struct sched_domain_topology_level *tl, int cpu)
5981 if (sd->flags & SD_SHARE_CPUPOWER) { 5976 if (sd->flags & SD_SHARE_CPUPOWER) {
5982 sd->imbalance_pct = 110; 5977 sd->imbalance_pct = 110;
5983 sd->smt_gain = 1178; /* ~15% */ 5978 sd->smt_gain = 1178; /* ~15% */
5984 sd->flags |= arch_sd_sibling_asym_packing();
5985 5979
5986 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { 5980 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
5987 sd->imbalance_pct = 117; 5981 sd->imbalance_pct = 117;