diff options
author | Paul Turner <pjt@google.com> | 2012-10-04 07:18:32 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-10-24 04:27:31 -0400 |
commit | f4e26b120b9de84cb627bc7361ba43cfdc51341f (patch) | |
tree | 19786e34c5de3c9b7c9c871a27b0f8d85cc8690d /kernel/sched | |
parent | 5b51f2f80b3b906ce59bd4dce6eca3c7f34cb1b9 (diff) |
sched: Introduce temporary FAIR_GROUP_SCHED dependency for load-tracking
While per-entity load-tracking is generally useful, beyond computing shares
distribution, e.g. runnable based load-balance (in progress), governors,
power-management, etc.
These facilities are not yet consumers of this data. This may be trivially
reverted when the information is required; but avoid paying the overhead for
calculations we will not use until then.
Signed-off-by: Paul Turner <pjt@google.com>
Reviewed-by: Ben Segall <bsegall@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20120823141507.422162369@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 7 | ||||
-rw-r--r-- | kernel/sched/fair.c | 13 | ||||
-rw-r--r-- | kernel/sched/sched.h | 9 |
3 files changed, 25 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f26860074ef2..5dae0d252ff7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1526,7 +1526,12 @@ static void __sched_fork(struct task_struct *p) | |||
1526 | p->se.vruntime = 0; | 1526 | p->se.vruntime = 0; |
1527 | INIT_LIST_HEAD(&p->se.group_node); | 1527 | INIT_LIST_HEAD(&p->se.group_node); |
1528 | 1528 | ||
1529 | #ifdef CONFIG_SMP | 1529 | /* |
1530 | * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be | ||
1531 | * removed when useful for applications beyond shares distribution (e.g. | ||
1532 | * load-balance). | ||
1533 | */ | ||
1534 | #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED) | ||
1530 | p->se.avg.runnable_avg_period = 0; | 1535 | p->se.avg.runnable_avg_period = 0; |
1531 | p->se.avg.runnable_avg_sum = 0; | 1536 | p->se.avg.runnable_avg_sum = 0; |
1532 | #endif | 1537 | #endif |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6ecf455fd95b..3e6a3531fa90 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -882,7 +882,8 @@ static inline void update_cfs_shares(struct cfs_rq *cfs_rq) | |||
882 | } | 882 | } |
883 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 883 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
884 | 884 | ||
885 | #ifdef CONFIG_SMP | 885 | /* Only depends on SMP, FAIR_GROUP_SCHED may be removed when useful in lb */ |
886 | #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED) | ||
886 | /* | 887 | /* |
887 | * We choose a half-life close to 1 scheduling period. | 888 | * We choose a half-life close to 1 scheduling period. |
888 | * Note: The tables below are dependent on this value. | 889 | * Note: The tables below are dependent on this value. |
@@ -3174,6 +3175,12 @@ unlock: | |||
3174 | } | 3175 | } |
3175 | 3176 | ||
3176 | /* | 3177 | /* |
3178 | * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be | ||
3179 | * removed when useful for applications beyond shares distribution (e.g. | ||
3180 | * load-balance). | ||
3181 | */ | ||
3182 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
3183 | /* | ||
3177 | * Called immediately before a task is migrated to a new cpu; task_cpu(p) and | 3184 | * Called immediately before a task is migrated to a new cpu; task_cpu(p) and |
3178 | * cfs_rq_of(p) references at time of call are still valid and identify the | 3185 | * cfs_rq_of(p) references at time of call are still valid and identify the |
3179 | * previous cpu. However, the caller only guarantees p->pi_lock is held; no | 3186 | * previous cpu. However, the caller only guarantees p->pi_lock is held; no |
@@ -3196,6 +3203,7 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu) | |||
3196 | atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load); | 3203 | atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load); |
3197 | } | 3204 | } |
3198 | } | 3205 | } |
3206 | #endif | ||
3199 | #endif /* CONFIG_SMP */ | 3207 | #endif /* CONFIG_SMP */ |
3200 | 3208 | ||
3201 | static unsigned long | 3209 | static unsigned long |
@@ -5773,8 +5781,9 @@ const struct sched_class fair_sched_class = { | |||
5773 | 5781 | ||
5774 | #ifdef CONFIG_SMP | 5782 | #ifdef CONFIG_SMP |
5775 | .select_task_rq = select_task_rq_fair, | 5783 | .select_task_rq = select_task_rq_fair, |
5784 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
5776 | .migrate_task_rq = migrate_task_rq_fair, | 5785 | .migrate_task_rq = migrate_task_rq_fair, |
5777 | 5786 | #endif | |
5778 | .rq_online = rq_online_fair, | 5787 | .rq_online = rq_online_fair, |
5779 | .rq_offline = rq_offline_fair, | 5788 | .rq_offline = rq_offline_fair, |
5780 | 5789 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0a75a430ca77..5eca173b563f 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -225,6 +225,12 @@ struct cfs_rq { | |||
225 | #endif | 225 | #endif |
226 | 226 | ||
227 | #ifdef CONFIG_SMP | 227 | #ifdef CONFIG_SMP |
228 | /* | ||
229 | * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be | ||
230 | * removed when useful for applications beyond shares distribution (e.g. | ||
231 | * load-balance). | ||
232 | */ | ||
233 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
228 | /* | 234 | /* |
229 | * CFS Load tracking | 235 | * CFS Load tracking |
230 | * Under CFS, load is tracked on a per-entity basis and aggregated up. | 236 | * Under CFS, load is tracked on a per-entity basis and aggregated up. |
@@ -234,7 +240,8 @@ struct cfs_rq { | |||
234 | u64 runnable_load_avg, blocked_load_avg; | 240 | u64 runnable_load_avg, blocked_load_avg; |
235 | atomic64_t decay_counter, removed_load; | 241 | atomic64_t decay_counter, removed_load; |
236 | u64 last_decay; | 242 | u64 last_decay; |
237 | 243 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | |
244 | /* These always depend on CONFIG_FAIR_GROUP_SCHED */ | ||
238 | #ifdef CONFIG_FAIR_GROUP_SCHED | 245 | #ifdef CONFIG_FAIR_GROUP_SCHED |
239 | u32 tg_runnable_contrib; | 246 | u32 tg_runnable_contrib; |
240 | u64 tg_load_contrib; | 247 | u64 tg_load_contrib; |