diff options
author | Vincent Guittot <vincent.guittot@linaro.org> | 2014-04-11 05:44:39 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-05-07 07:33:51 -0400 |
commit | 607b45e9a216e89a63351556e488eea06be0ff48 (patch) | |
tree | 2d275c4d7842541baff33c0486450db4acb29dd6 | |
parent | 2dfd747629e65f89d2dbceb92fffc763f66228b2 (diff) |
sched, powerpc: Create a dedicated topology table
Create a dedicated topology table for handling asymetric feature of powerpc.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Fleming <afleming@freescale.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Grant Likely <grant.likely@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Preeti U. Murthy <preeti@linux.vnet.ibm.com>
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Toshi Kani <toshi.kani@hp.com>
Cc: Vasant Hegde <hegdevasant@linux.vnet.ibm.com>
Cc: tony.luck@intel.com
Cc: fenghua.yu@intel.com
Cc: schwidefsky@de.ibm.com
Cc: cmetcalf@tilera.com
Cc: dietmar.eggemann@arm.com
Cc: devicetree@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/1397209481-28542-4-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/powerpc/kernel/smp.c | 31 | ||||
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 6 |
3 files changed, 23 insertions, 16 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index e2a4232c5871..10ffffef0414 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -766,6 +766,28 @@ int setup_profiling_timer(unsigned int multiplier) | |||
766 | return 0; | 766 | return 0; |
767 | } | 767 | } |
768 | 768 | ||
769 | #ifdef CONFIG_SCHED_SMT | ||
770 | /* cpumask of CPUs with asymetric SMT dependancy */ | ||
771 | static const int powerpc_smt_flags(void) | ||
772 | { | ||
773 | int flags = SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES; | ||
774 | |||
775 | if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { | ||
776 | printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); | ||
777 | flags |= SD_ASYM_PACKING; | ||
778 | } | ||
779 | return flags; | ||
780 | } | ||
781 | #endif | ||
782 | |||
783 | static struct sched_domain_topology_level powerpc_topology[] = { | ||
784 | #ifdef CONFIG_SCHED_SMT | ||
785 | { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, | ||
786 | #endif | ||
787 | { cpu_cpu_mask, SD_INIT_NAME(DIE) }, | ||
788 | { NULL, }, | ||
789 | }; | ||
790 | |||
769 | void __init smp_cpus_done(unsigned int max_cpus) | 791 | void __init smp_cpus_done(unsigned int max_cpus) |
770 | { | 792 | { |
771 | cpumask_var_t old_mask; | 793 | cpumask_var_t old_mask; |
@@ -790,15 +812,8 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
790 | 812 | ||
791 | dump_numa_cpu_topology(); | 813 | dump_numa_cpu_topology(); |
792 | 814 | ||
793 | } | 815 | set_sched_topology(powerpc_topology); |
794 | 816 | ||
795 | int arch_sd_sibling_asym_packing(void) | ||
796 | { | ||
797 | if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { | ||
798 | printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); | ||
799 | return SD_ASYM_PACKING; | ||
800 | } | ||
801 | return 0; | ||
802 | } | 817 | } |
803 | 818 | ||
804 | #ifdef CONFIG_HOTPLUG_CPU | 819 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 656b035c30e5..439a153b8403 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -877,8 +877,6 @@ enum cpu_idle_type { | |||
877 | #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ | 877 | #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ |
878 | #define SD_NUMA 0x4000 /* cross-node balancing */ | 878 | #define SD_NUMA 0x4000 /* cross-node balancing */ |
879 | 879 | ||
880 | extern int __weak arch_sd_sibiling_asym_packing(void); | ||
881 | |||
882 | #ifdef CONFIG_SCHED_SMT | 880 | #ifdef CONFIG_SCHED_SMT |
883 | static inline const int cpu_smt_flags(void) | 881 | static inline const int cpu_smt_flags(void) |
884 | { | 882 | { |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e59e5aec745a..7e348e238bf1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -5796,11 +5796,6 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
5796 | atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight); | 5796 | atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight); |
5797 | } | 5797 | } |
5798 | 5798 | ||
5799 | int __weak arch_sd_sibling_asym_packing(void) | ||
5800 | { | ||
5801 | return 0*SD_ASYM_PACKING; | ||
5802 | } | ||
5803 | |||
5804 | /* | 5799 | /* |
5805 | * Initializers for schedule domains | 5800 | * Initializers for schedule domains |
5806 | * Non-inlined to reduce accumulated stack pressure in build_sched_domains() | 5801 | * Non-inlined to reduce accumulated stack pressure in build_sched_domains() |
@@ -5981,7 +5976,6 @@ sd_init(struct sched_domain_topology_level *tl, int cpu) | |||
5981 | if (sd->flags & SD_SHARE_CPUPOWER) { | 5976 | if (sd->flags & SD_SHARE_CPUPOWER) { |
5982 | sd->imbalance_pct = 110; | 5977 | sd->imbalance_pct = 110; |
5983 | sd->smt_gain = 1178; /* ~15% */ | 5978 | sd->smt_gain = 1178; /* ~15% */ |
5984 | sd->flags |= arch_sd_sibling_asym_packing(); | ||
5985 | 5979 | ||
5986 | } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { | 5980 | } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { |
5987 | sd->imbalance_pct = 117; | 5981 | sd->imbalance_pct = 117; |