diff options
-rw-r--r-- | kernel/sched/fair.c | 39 |
1 files changed, 36 insertions, 3 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2805bd7c8994..03adf9fb48b1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -3173,6 +3173,36 @@ static inline int propagate_entity_load_avg(struct sched_entity *se) | |||
3173 | return 1; | 3173 | return 1; |
3174 | } | 3174 | } |
3175 | 3175 | ||
3176 | /* | ||
3177 | * Check if we need to update the load and the utilization of a blocked | ||
3178 | * group_entity: | ||
3179 | */ | ||
3180 | static inline bool skip_blocked_update(struct sched_entity *se) | ||
3181 | { | ||
3182 | struct cfs_rq *gcfs_rq = group_cfs_rq(se); | ||
3183 | |||
3184 | /* | ||
3185 | * If sched_entity still have not zero load or utilization, we have to | ||
3186 | * decay it: | ||
3187 | */ | ||
3188 | if (se->avg.load_avg || se->avg.util_avg) | ||
3189 | return false; | ||
3190 | |||
3191 | /* | ||
3192 | * If there is a pending propagation, we have to update the load and | ||
3193 | * the utilization of the sched_entity: | ||
3194 | */ | ||
3195 | if (gcfs_rq->propagate_avg) | ||
3196 | return false; | ||
3197 | |||
3198 | /* | ||
3199 | * Otherwise, the load and the utilization of the sched_entity is | ||
3200 | * already zero and there is no pending propagation, so it will be a | ||
3201 | * waste of time to try to decay it: | ||
3202 | */ | ||
3203 | return true; | ||
3204 | } | ||
3205 | |||
3176 | #else /* CONFIG_FAIR_GROUP_SCHED */ | 3206 | #else /* CONFIG_FAIR_GROUP_SCHED */ |
3177 | 3207 | ||
3178 | static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} | 3208 | static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} |
@@ -6961,6 +6991,8 @@ static void update_blocked_averages(int cpu) | |||
6961 | * list_add_leaf_cfs_rq() for details. | 6991 | * list_add_leaf_cfs_rq() for details. |
6962 | */ | 6992 | */ |
6963 | for_each_leaf_cfs_rq(rq, cfs_rq) { | 6993 | for_each_leaf_cfs_rq(rq, cfs_rq) { |
6994 | struct sched_entity *se; | ||
6995 | |||
6964 | /* throttled entities do not contribute to load */ | 6996 | /* throttled entities do not contribute to load */ |
6965 | if (throttled_hierarchy(cfs_rq)) | 6997 | if (throttled_hierarchy(cfs_rq)) |
6966 | continue; | 6998 | continue; |
@@ -6968,9 +7000,10 @@ static void update_blocked_averages(int cpu) | |||
6968 | if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true)) | 7000 | if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true)) |
6969 | update_tg_load_avg(cfs_rq, 0); | 7001 | update_tg_load_avg(cfs_rq, 0); |
6970 | 7002 | ||
6971 | /* Propagate pending load changes to the parent */ | 7003 | /* Propagate pending load changes to the parent, if any: */ |
6972 | if (cfs_rq->tg->se[cpu]) | 7004 | se = cfs_rq->tg->se[cpu]; |
6973 | update_load_avg(cfs_rq->tg->se[cpu], 0); | 7005 | if (se && !skip_blocked_update(se)) |
7006 | update_load_avg(se, 0); | ||
6974 | } | 7007 | } |
6975 | rq_unlock_irqrestore(rq, &rf); | 7008 | rq_unlock_irqrestore(rq, &rf); |
6976 | } | 7009 | } |