diff options
author | Byungchul Park <byungchul.park@lge.com> | 2015-10-23 12:16:19 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-12-04 04:34:42 -0500 |
commit | ad936d8658fd348338cb7d42c577dac77892b074 (patch) | |
tree | 0ba206f9c8515b4ed01b132d885f9bf07715b937 /kernel/sched/fair.c | |
parent | 467386fbbf085e716570813a5d3be3927c348e11 (diff) |
sched/fair: Make it possible to account fair load avg consistently
The current code accounts for the time a task was absent from the fair
class (per ATTACH_AGE_LOAD). However it does not work correctly when a
task got migrated or moved to another cgroup while outside of the fair
class.
This patch tries to address that by aging on migration. We locklessly
read the 'last_update_time' stamp from both the old and new cfs_rq,
ages the load upto the old time, and sets it to the new time.
These timestamps should in general not be more than 1 tick apart from
one another, so there is a definite bound on things.
Signed-off-by: Byungchul Park <byungchul.park@lge.com>
[ Changelog, a few edits and !SMP build fix ]
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1445616981-29904-2-git-send-email-byungchul.park@lge.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 46 |
1 files changed, 46 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ff8ec8695957..efd664c4926e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -2715,6 +2715,52 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) | |||
2715 | } | 2715 | } |
2716 | } | 2716 | } |
2717 | 2717 | ||
2718 | /* | ||
2719 | * Called within set_task_rq() right before setting a task's cpu. The | ||
2720 | * caller only guarantees p->pi_lock is held; no other assumptions, | ||
2721 | * including the state of rq->lock, should be made. | ||
2722 | */ | ||
2723 | void set_task_rq_fair(struct sched_entity *se, | ||
2724 | struct cfs_rq *prev, struct cfs_rq *next) | ||
2725 | { | ||
2726 | if (!sched_feat(ATTACH_AGE_LOAD)) | ||
2727 | return; | ||
2728 | |||
2729 | /* | ||
2730 | * We are supposed to update the task to "current" time, then its up to | ||
2731 | * date and ready to go to new CPU/cfs_rq. But we have difficulty in | ||
2732 | * getting what current time is, so simply throw away the out-of-date | ||
2733 | * time. This will result in the wakee task is less decayed, but giving | ||
2734 | * the wakee more load sounds not bad. | ||
2735 | */ | ||
2736 | if (se->avg.last_update_time && prev) { | ||
2737 | u64 p_last_update_time; | ||
2738 | u64 n_last_update_time; | ||
2739 | |||
2740 | #ifndef CONFIG_64BIT | ||
2741 | u64 p_last_update_time_copy; | ||
2742 | u64 n_last_update_time_copy; | ||
2743 | |||
2744 | do { | ||
2745 | p_last_update_time_copy = prev->load_last_update_time_copy; | ||
2746 | n_last_update_time_copy = next->load_last_update_time_copy; | ||
2747 | |||
2748 | smp_rmb(); | ||
2749 | |||
2750 | p_last_update_time = prev->avg.last_update_time; | ||
2751 | n_last_update_time = next->avg.last_update_time; | ||
2752 | |||
2753 | } while (p_last_update_time != p_last_update_time_copy || | ||
2754 | n_last_update_time != n_last_update_time_copy); | ||
2755 | #else | ||
2756 | p_last_update_time = prev->avg.last_update_time; | ||
2757 | n_last_update_time = next->avg.last_update_time; | ||
2758 | #endif | ||
2759 | __update_load_avg(p_last_update_time, cpu_of(rq_of(prev)), | ||
2760 | &se->avg, 0, 0, NULL); | ||
2761 | se->avg.last_update_time = n_last_update_time; | ||
2762 | } | ||
2763 | } | ||
2718 | #else /* CONFIG_FAIR_GROUP_SCHED */ | 2764 | #else /* CONFIG_FAIR_GROUP_SCHED */ |
2719 | static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} | 2765 | static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} |
2720 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 2766 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |