diff options
author | Vincent Guittot <vincent.guittot@linaro.org> | 2016-11-08 04:53:44 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-11-16 04:29:09 -0500 |
commit | d31b1a66cbe0931733583ad9d9e8c6cfd710907d (patch) | |
tree | 44b05ab2336d8b074632d3f61dca8c2b1c720e6f | |
parent | 9c2791f936ef5fd04a118b5c284f2c9a95f4a647 (diff) |
sched/fair: Factorize PELT update
Every time we modify load/utilization of sched_entity, we start to
sync it with its cfs_rq. This update is done in different ways:
- when attaching/detaching a sched_entity, we update cfs_rq and then
we sync the entity with the cfs_rq.
- when enqueueing/dequeuing the sched_entity, we update both
sched_entity and cfs_rq metrics to now.
Use update_load_avg() everytime we have to update and sync cfs_rq and
sched_entity before changing the state of a sched_enity.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Morten.Rasmussen@arm.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bsegall@google.com
Cc: kernellwp@gmail.com
Cc: pjt@google.com
Cc: yuyang.du@intel.com
Link: http://lkml.kernel.org/r/1478598827-32372-4-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/sched/fair.c | 76 |
1 files changed, 25 insertions, 51 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4a67026a2424..d707ad037b31 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -3092,8 +3092,14 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) | |||
3092 | return decayed || removed_load; | 3092 | return decayed || removed_load; |
3093 | } | 3093 | } |
3094 | 3094 | ||
3095 | /* | ||
3096 | * Optional action to be done while updating the load average | ||
3097 | */ | ||
3098 | #define UPDATE_TG 0x1 | ||
3099 | #define SKIP_AGE_LOAD 0x2 | ||
3100 | |||
3095 | /* Update task and its cfs_rq load average */ | 3101 | /* Update task and its cfs_rq load average */ |
3096 | static inline void update_load_avg(struct sched_entity *se, int update_tg) | 3102 | static inline void update_load_avg(struct sched_entity *se, int flags) |
3097 | { | 3103 | { |
3098 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 3104 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
3099 | u64 now = cfs_rq_clock_task(cfs_rq); | 3105 | u64 now = cfs_rq_clock_task(cfs_rq); |
@@ -3104,11 +3110,13 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg) | |||
3104 | * Track task load average for carrying it to new CPU after migrated, and | 3110 | * Track task load average for carrying it to new CPU after migrated, and |
3105 | * track group sched_entity load average for task_h_load calc in migration | 3111 | * track group sched_entity load average for task_h_load calc in migration |
3106 | */ | 3112 | */ |
3107 | __update_load_avg(now, cpu, &se->avg, | 3113 | if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) { |
3114 | __update_load_avg(now, cpu, &se->avg, | ||
3108 | se->on_rq * scale_load_down(se->load.weight), | 3115 | se->on_rq * scale_load_down(se->load.weight), |
3109 | cfs_rq->curr == se, NULL); | 3116 | cfs_rq->curr == se, NULL); |
3117 | } | ||
3110 | 3118 | ||
3111 | if (update_cfs_rq_load_avg(now, cfs_rq, true) && update_tg) | 3119 | if (update_cfs_rq_load_avg(now, cfs_rq, true) && (flags & UPDATE_TG)) |
3112 | update_tg_load_avg(cfs_rq, 0); | 3120 | update_tg_load_avg(cfs_rq, 0); |
3113 | } | 3121 | } |
3114 | 3122 | ||
@@ -3122,26 +3130,6 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg) | |||
3122 | */ | 3130 | */ |
3123 | static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) | 3131 | static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) |
3124 | { | 3132 | { |
3125 | if (!sched_feat(ATTACH_AGE_LOAD)) | ||
3126 | goto skip_aging; | ||
3127 | |||
3128 | /* | ||
3129 | * If we got migrated (either between CPUs or between cgroups) we'll | ||
3130 | * have aged the average right before clearing @last_update_time. | ||
3131 | * | ||
3132 | * Or we're fresh through post_init_entity_util_avg(). | ||
3133 | */ | ||
3134 | if (se->avg.last_update_time) { | ||
3135 | __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)), | ||
3136 | &se->avg, 0, 0, NULL); | ||
3137 | |||
3138 | /* | ||
3139 | * XXX: we could have just aged the entire load away if we've been | ||
3140 | * absent from the fair class for too long. | ||
3141 | */ | ||
3142 | } | ||
3143 | |||
3144 | skip_aging: | ||
3145 | se->avg.last_update_time = cfs_rq->avg.last_update_time; | 3133 | se->avg.last_update_time = cfs_rq->avg.last_update_time; |
3146 | cfs_rq->avg.load_avg += se->avg.load_avg; | 3134 | cfs_rq->avg.load_avg += se->avg.load_avg; |
3147 | cfs_rq->avg.load_sum += se->avg.load_sum; | 3135 | cfs_rq->avg.load_sum += se->avg.load_sum; |
@@ -3161,9 +3149,6 @@ skip_aging: | |||
3161 | */ | 3149 | */ |
3162 | static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) | 3150 | static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) |
3163 | { | 3151 | { |
3164 | __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)), | ||
3165 | &se->avg, se->on_rq * scale_load_down(se->load.weight), | ||
3166 | cfs_rq->curr == se, NULL); | ||
3167 | 3152 | ||
3168 | sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); | 3153 | sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); |
3169 | sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum); | 3154 | sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum); |
@@ -3178,34 +3163,20 @@ static inline void | |||
3178 | enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) | 3163 | enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) |
3179 | { | 3164 | { |
3180 | struct sched_avg *sa = &se->avg; | 3165 | struct sched_avg *sa = &se->avg; |
3181 | u64 now = cfs_rq_clock_task(cfs_rq); | ||
3182 | int migrated, decayed; | ||
3183 | |||
3184 | migrated = !sa->last_update_time; | ||
3185 | if (!migrated) { | ||
3186 | __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa, | ||
3187 | se->on_rq * scale_load_down(se->load.weight), | ||
3188 | cfs_rq->curr == se, NULL); | ||
3189 | } | ||
3190 | |||
3191 | decayed = update_cfs_rq_load_avg(now, cfs_rq, !migrated); | ||
3192 | 3166 | ||
3193 | cfs_rq->runnable_load_avg += sa->load_avg; | 3167 | cfs_rq->runnable_load_avg += sa->load_avg; |
3194 | cfs_rq->runnable_load_sum += sa->load_sum; | 3168 | cfs_rq->runnable_load_sum += sa->load_sum; |
3195 | 3169 | ||
3196 | if (migrated) | 3170 | if (!sa->last_update_time) { |
3197 | attach_entity_load_avg(cfs_rq, se); | 3171 | attach_entity_load_avg(cfs_rq, se); |
3198 | |||
3199 | if (decayed || migrated) | ||
3200 | update_tg_load_avg(cfs_rq, 0); | 3172 | update_tg_load_avg(cfs_rq, 0); |
3173 | } | ||
3201 | } | 3174 | } |
3202 | 3175 | ||
3203 | /* Remove the runnable load generated by se from cfs_rq's runnable load average */ | 3176 | /* Remove the runnable load generated by se from cfs_rq's runnable load average */ |
3204 | static inline void | 3177 | static inline void |
3205 | dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) | 3178 | dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) |
3206 | { | 3179 | { |
3207 | update_load_avg(se, 1); | ||
3208 | |||
3209 | cfs_rq->runnable_load_avg = | 3180 | cfs_rq->runnable_load_avg = |
3210 | max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0); | 3181 | max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0); |
3211 | cfs_rq->runnable_load_sum = | 3182 | cfs_rq->runnable_load_sum = |
@@ -3289,7 +3260,10 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) | |||
3289 | return 0; | 3260 | return 0; |
3290 | } | 3261 | } |
3291 | 3262 | ||
3292 | static inline void update_load_avg(struct sched_entity *se, int not_used) | 3263 | #define UPDATE_TG 0x0 |
3264 | #define SKIP_AGE_LOAD 0x0 | ||
3265 | |||
3266 | static inline void update_load_avg(struct sched_entity *se, int not_used1) | ||
3293 | { | 3267 | { |
3294 | cpufreq_update_util(rq_of(cfs_rq_of(se)), 0); | 3268 | cpufreq_update_util(rq_of(cfs_rq_of(se)), 0); |
3295 | } | 3269 | } |
@@ -3434,6 +3408,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | |||
3434 | if (renorm && !curr) | 3408 | if (renorm && !curr) |
3435 | se->vruntime += cfs_rq->min_vruntime; | 3409 | se->vruntime += cfs_rq->min_vruntime; |
3436 | 3410 | ||
3411 | update_load_avg(se, UPDATE_TG); | ||
3437 | enqueue_entity_load_avg(cfs_rq, se); | 3412 | enqueue_entity_load_avg(cfs_rq, se); |
3438 | account_entity_enqueue(cfs_rq, se); | 3413 | account_entity_enqueue(cfs_rq, se); |
3439 | update_cfs_shares(cfs_rq); | 3414 | update_cfs_shares(cfs_rq); |
@@ -3508,6 +3483,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | |||
3508 | * Update run-time statistics of the 'current'. | 3483 | * Update run-time statistics of the 'current'. |
3509 | */ | 3484 | */ |
3510 | update_curr(cfs_rq); | 3485 | update_curr(cfs_rq); |
3486 | update_load_avg(se, UPDATE_TG); | ||
3511 | dequeue_entity_load_avg(cfs_rq, se); | 3487 | dequeue_entity_load_avg(cfs_rq, se); |
3512 | 3488 | ||
3513 | update_stats_dequeue(cfs_rq, se, flags); | 3489 | update_stats_dequeue(cfs_rq, se, flags); |
@@ -3595,7 +3571,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
3595 | */ | 3571 | */ |
3596 | update_stats_wait_end(cfs_rq, se); | 3572 | update_stats_wait_end(cfs_rq, se); |
3597 | __dequeue_entity(cfs_rq, se); | 3573 | __dequeue_entity(cfs_rq, se); |
3598 | update_load_avg(se, 1); | 3574 | update_load_avg(se, UPDATE_TG); |
3599 | } | 3575 | } |
3600 | 3576 | ||
3601 | update_stats_curr_start(cfs_rq, se); | 3577 | update_stats_curr_start(cfs_rq, se); |
@@ -3713,7 +3689,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | |||
3713 | /* | 3689 | /* |
3714 | * Ensure that runnable average is periodically updated. | 3690 | * Ensure that runnable average is periodically updated. |
3715 | */ | 3691 | */ |
3716 | update_load_avg(curr, 1); | 3692 | update_load_avg(curr, UPDATE_TG); |
3717 | update_cfs_shares(cfs_rq); | 3693 | update_cfs_shares(cfs_rq); |
3718 | 3694 | ||
3719 | #ifdef CONFIG_SCHED_HRTICK | 3695 | #ifdef CONFIG_SCHED_HRTICK |
@@ -4610,7 +4586,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
4610 | if (cfs_rq_throttled(cfs_rq)) | 4586 | if (cfs_rq_throttled(cfs_rq)) |
4611 | break; | 4587 | break; |
4612 | 4588 | ||
4613 | update_load_avg(se, 1); | 4589 | update_load_avg(se, UPDATE_TG); |
4614 | update_cfs_shares(cfs_rq); | 4590 | update_cfs_shares(cfs_rq); |
4615 | } | 4591 | } |
4616 | 4592 | ||
@@ -4669,7 +4645,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
4669 | if (cfs_rq_throttled(cfs_rq)) | 4645 | if (cfs_rq_throttled(cfs_rq)) |
4670 | break; | 4646 | break; |
4671 | 4647 | ||
4672 | update_load_avg(se, 1); | 4648 | update_load_avg(se, UPDATE_TG); |
4673 | update_cfs_shares(cfs_rq); | 4649 | update_cfs_shares(cfs_rq); |
4674 | } | 4650 | } |
4675 | 4651 | ||
@@ -8821,10 +8797,9 @@ static inline bool vruntime_normalized(struct task_struct *p) | |||
8821 | static void detach_entity_cfs_rq(struct sched_entity *se) | 8797 | static void detach_entity_cfs_rq(struct sched_entity *se) |
8822 | { | 8798 | { |
8823 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 8799 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
8824 | u64 now = cfs_rq_clock_task(cfs_rq); | ||
8825 | 8800 | ||
8826 | /* Catch up with the cfs_rq and remove our load when we leave */ | 8801 | /* Catch up with the cfs_rq and remove our load when we leave */ |
8827 | update_cfs_rq_load_avg(now, cfs_rq, false); | 8802 | update_load_avg(se, 0); |
8828 | detach_entity_load_avg(cfs_rq, se); | 8803 | detach_entity_load_avg(cfs_rq, se); |
8829 | update_tg_load_avg(cfs_rq, false); | 8804 | update_tg_load_avg(cfs_rq, false); |
8830 | } | 8805 | } |
@@ -8832,7 +8807,6 @@ static void detach_entity_cfs_rq(struct sched_entity *se) | |||
8832 | static void attach_entity_cfs_rq(struct sched_entity *se) | 8807 | static void attach_entity_cfs_rq(struct sched_entity *se) |
8833 | { | 8808 | { |
8834 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 8809 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
8835 | u64 now = cfs_rq_clock_task(cfs_rq); | ||
8836 | 8810 | ||
8837 | #ifdef CONFIG_FAIR_GROUP_SCHED | 8811 | #ifdef CONFIG_FAIR_GROUP_SCHED |
8838 | /* | 8812 | /* |
@@ -8843,7 +8817,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se) | |||
8843 | #endif | 8817 | #endif |
8844 | 8818 | ||
8845 | /* Synchronize entity with its cfs_rq */ | 8819 | /* Synchronize entity with its cfs_rq */ |
8846 | update_cfs_rq_load_avg(now, cfs_rq, false); | 8820 | update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); |
8847 | attach_entity_load_avg(cfs_rq, se); | 8821 | attach_entity_load_avg(cfs_rq, se); |
8848 | update_tg_load_avg(cfs_rq, false); | 8822 | update_tg_load_avg(cfs_rq, false); |
8849 | } | 8823 | } |