aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-01 04:34:35 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-04 04:09:54 -0400
commita52bfd73589eaf88d9c95ad2c1de0b38a6b27972 (patch)
tree33cee609a46624525abf1643d71d0fbc79e25f87
parentcc9fba7d7672fa3ed58d9d9ecb6c45b1351c29a6 (diff)
sched: Add smt_gain
The idea is that multi-threading a core yields more work capacity than a single thread, provide a way to express a static gain for threads. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Tested-by: Andreas Herrmann <andreas.herrmann3@amd.com> Acked-by: Andreas Herrmann <andreas.herrmann3@amd.com> Acked-by: Gautham R Shenoy <ego@in.ibm.com> Cc: Balbir Singh <balbir@in.ibm.com> LKML-Reference: <20090901083826.073345955@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/topology.h1
-rw-r--r--kernel/sched.c8
3 files changed, 9 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 651dded25720..9c81c921acb3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -921,6 +921,7 @@ struct sched_domain {
921 unsigned int newidle_idx; 921 unsigned int newidle_idx;
922 unsigned int wake_idx; 922 unsigned int wake_idx;
923 unsigned int forkexec_idx; 923 unsigned int forkexec_idx;
924 unsigned int smt_gain;
924 int flags; /* See SD_* */ 925 int flags; /* See SD_* */
925 enum sched_domain_level level; 926 enum sched_domain_level level;
926 927
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 7402c1a27c4f..6203ae5067ce 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -99,6 +99,7 @@ int arch_update_cpu_topology(void);
99 | SD_SHARE_CPUPOWER, \ 99 | SD_SHARE_CPUPOWER, \
100 .last_balance = jiffies, \ 100 .last_balance = jiffies, \
101 .balance_interval = 1, \ 101 .balance_interval = 1, \
102 .smt_gain = 1178, /* 15% */ \
102} 103}
103#endif 104#endif
104#endif /* CONFIG_SCHED_SMT */ 105#endif /* CONFIG_SCHED_SMT */
diff --git a/kernel/sched.c b/kernel/sched.c
index ecb4a47d4214..55112261027b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8523,9 +8523,15 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
8523 weight = cpumask_weight(sched_domain_span(sd)); 8523 weight = cpumask_weight(sched_domain_span(sd));
8524 /* 8524 /*
8525 * SMT siblings share the power of a single core. 8525 * SMT siblings share the power of a single core.
8526 * Usually multiple threads get a better yield out of
8527 * that one core than a single thread would have,
8528 * reflect that in sd->smt_gain.
8526 */ 8529 */
8527 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) 8530 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
8531 power *= sd->smt_gain;
8528 power /= weight; 8532 power /= weight;
8533 power >>= SCHED_LOAD_SHIFT;
8534 }
8529 sg_inc_cpu_power(sd->groups, power); 8535 sg_inc_cpu_power(sd->groups, power);
8530 return; 8536 return;
8531 } 8537 }