diff options
author | Peter Zijlstra <peterz@infradead.org> | 2012-02-11 00:05:00 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-02-10 10:17:10 -0500 |
commit | fed14d45f945042a15b09de48d7d3d58d9455fc4 (patch) | |
tree | 9f94e472cb3395023a7b29dfee17829fc75c8f0d | |
parent | 3c4017c13f91069194fce3160944efec50f15a6e (diff) |
sched/fair: Track cgroup depth
Track depth in cgroup tree, this is useful for things like
find_matching_se() where you need to get to a common parent of two
sched entities.
Keeping the depth avoids having to calculate it on the spot, which
saves a number of possible cache-misses.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1328936700.2476.17.camel@laptop
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/sched/fair.c | 47 |
2 files changed, 22 insertions, 26 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index e3d556427b2e..555e27d717c0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1078,6 +1078,7 @@ struct sched_entity { | |||
1078 | #endif | 1078 | #endif |
1079 | 1079 | ||
1080 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1080 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1081 | int depth; | ||
1081 | struct sched_entity *parent; | 1082 | struct sched_entity *parent; |
1082 | /* rq on which this entity is (to be) queued: */ | 1083 | /* rq on which this entity is (to be) queued: */ |
1083 | struct cfs_rq *cfs_rq; | 1084 | struct cfs_rq *cfs_rq; |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 04fea7744a9f..748a7ac3388f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -322,13 +322,13 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) | |||
322 | list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) | 322 | list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) |
323 | 323 | ||
324 | /* Do the two (enqueued) entities belong to the same group ? */ | 324 | /* Do the two (enqueued) entities belong to the same group ? */ |
325 | static inline int | 325 | static inline struct cfs_rq * |
326 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | 326 | is_same_group(struct sched_entity *se, struct sched_entity *pse) |
327 | { | 327 | { |
328 | if (se->cfs_rq == pse->cfs_rq) | 328 | if (se->cfs_rq == pse->cfs_rq) |
329 | return 1; | 329 | return se->cfs_rq; |
330 | 330 | ||
331 | return 0; | 331 | return NULL; |
332 | } | 332 | } |
333 | 333 | ||
334 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | 334 | static inline struct sched_entity *parent_entity(struct sched_entity *se) |
@@ -336,17 +336,6 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se) | |||
336 | return se->parent; | 336 | return se->parent; |
337 | } | 337 | } |
338 | 338 | ||
339 | /* return depth at which a sched entity is present in the hierarchy */ | ||
340 | static inline int depth_se(struct sched_entity *se) | ||
341 | { | ||
342 | int depth = 0; | ||
343 | |||
344 | for_each_sched_entity(se) | ||
345 | depth++; | ||
346 | |||
347 | return depth; | ||
348 | } | ||
349 | |||
350 | static void | 339 | static void |
351 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) | 340 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) |
352 | { | 341 | { |
@@ -360,8 +349,8 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse) | |||
360 | */ | 349 | */ |
361 | 350 | ||
362 | /* First walk up until both entities are at same depth */ | 351 | /* First walk up until both entities are at same depth */ |
363 | se_depth = depth_se(*se); | 352 | se_depth = (*se)->depth; |
364 | pse_depth = depth_se(*pse); | 353 | pse_depth = (*pse)->depth; |
365 | 354 | ||
366 | while (se_depth > pse_depth) { | 355 | while (se_depth > pse_depth) { |
367 | se_depth--; | 356 | se_depth--; |
@@ -426,10 +415,10 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) | |||
426 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | 415 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ |
427 | for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) | 416 | for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) |
428 | 417 | ||
429 | static inline int | 418 | static inline struct cfs_rq * |
430 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | 419 | is_same_group(struct sched_entity *se, struct sched_entity *pse) |
431 | { | 420 | { |
432 | return 1; | 421 | return cfs_rq_of(se); /* always the same rq */ |
433 | } | 422 | } |
434 | 423 | ||
435 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | 424 | static inline struct sched_entity *parent_entity(struct sched_entity *se) |
@@ -7262,7 +7251,9 @@ void init_cfs_rq(struct cfs_rq *cfs_rq) | |||
7262 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7251 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7263 | static void task_move_group_fair(struct task_struct *p, int on_rq) | 7252 | static void task_move_group_fair(struct task_struct *p, int on_rq) |
7264 | { | 7253 | { |
7254 | struct sched_entity *se = &p->se; | ||
7265 | struct cfs_rq *cfs_rq; | 7255 | struct cfs_rq *cfs_rq; |
7256 | |||
7266 | /* | 7257 | /* |
7267 | * If the task was not on the rq at the time of this cgroup movement | 7258 | * If the task was not on the rq at the time of this cgroup movement |
7268 | * it must have been asleep, sleeping tasks keep their ->vruntime | 7259 | * it must have been asleep, sleeping tasks keep their ->vruntime |
@@ -7288,23 +7279,24 @@ static void task_move_group_fair(struct task_struct *p, int on_rq) | |||
7288 | * To prevent boost or penalty in the new cfs_rq caused by delta | 7279 | * To prevent boost or penalty in the new cfs_rq caused by delta |
7289 | * min_vruntime between the two cfs_rqs, we skip vruntime adjustment. | 7280 | * min_vruntime between the two cfs_rqs, we skip vruntime adjustment. |
7290 | */ | 7281 | */ |
7291 | if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING)) | 7282 | if (!on_rq && (!se->sum_exec_runtime || p->state == TASK_WAKING)) |
7292 | on_rq = 1; | 7283 | on_rq = 1; |
7293 | 7284 | ||
7294 | if (!on_rq) | 7285 | if (!on_rq) |
7295 | p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime; | 7286 | se->vruntime -= cfs_rq_of(se)->min_vruntime; |
7296 | set_task_rq(p, task_cpu(p)); | 7287 | set_task_rq(p, task_cpu(p)); |
7288 | se->depth = se->parent ? se->parent->depth + 1 : 0; | ||
7297 | if (!on_rq) { | 7289 | if (!on_rq) { |
7298 | cfs_rq = cfs_rq_of(&p->se); | 7290 | cfs_rq = cfs_rq_of(se); |
7299 | p->se.vruntime += cfs_rq->min_vruntime; | 7291 | se->vruntime += cfs_rq->min_vruntime; |
7300 | #ifdef CONFIG_SMP | 7292 | #ifdef CONFIG_SMP |
7301 | /* | 7293 | /* |
7302 | * migrate_task_rq_fair() will have removed our previous | 7294 | * migrate_task_rq_fair() will have removed our previous |
7303 | * contribution, but we must synchronize for ongoing future | 7295 | * contribution, but we must synchronize for ongoing future |
7304 | * decay. | 7296 | * decay. |
7305 | */ | 7297 | */ |
7306 | p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter); | 7298 | se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter); |
7307 | cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib; | 7299 | cfs_rq->blocked_load_avg += se->avg.load_avg_contrib; |
7308 | #endif | 7300 | #endif |
7309 | } | 7301 | } |
7310 | } | 7302 | } |
@@ -7400,10 +7392,13 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, | |||
7400 | if (!se) | 7392 | if (!se) |
7401 | return; | 7393 | return; |
7402 | 7394 | ||
7403 | if (!parent) | 7395 | if (!parent) { |
7404 | se->cfs_rq = &rq->cfs; | 7396 | se->cfs_rq = &rq->cfs; |
7405 | else | 7397 | se->depth = 0; |
7398 | } else { | ||
7406 | se->cfs_rq = parent->my_q; | 7399 | se->cfs_rq = parent->my_q; |
7400 | se->depth = parent->depth + 1; | ||
7401 | } | ||
7407 | 7402 | ||
7408 | se->my_q = cfs_rq; | 7403 | se->my_q = cfs_rq; |
7409 | /* guarantee group entities always have weight */ | 7404 | /* guarantee group entities always have weight */ |