summaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-05-06 11:32:43 -0400
committerIngo Molnar <mingo@kernel.org>2017-09-29 13:35:12 -0400
commit88c0616ee729067ecb412bed76ef4a8734ea5100 (patch)
tree9271e6c2ea27982118e85130b7d45df899f9e315 /kernel/sched/fair.c
parentc7b50216818ef3dca14a52e3499750fbad2d9691 (diff)
sched/fair: Change update_load_avg() arguments
Most call sites of update_load_avg() already have cfs_rq_of(se) available, pass it down instead of recomputing it. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c31
1 files changed, 15 insertions, 16 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3b5b82345774..a6c53580fea0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3480,9 +3480,8 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3480#define SKIP_AGE_LOAD 0x2 3480#define SKIP_AGE_LOAD 0x2
3481 3481
3482/* Update task and its cfs_rq load average */ 3482/* Update task and its cfs_rq load average */
3483static inline void update_load_avg(struct sched_entity *se, int flags) 3483static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3484{ 3484{
3485 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3486 u64 now = cfs_rq_clock_task(cfs_rq); 3485 u64 now = cfs_rq_clock_task(cfs_rq);
3487 struct rq *rq = rq_of(cfs_rq); 3486 struct rq *rq = rq_of(cfs_rq);
3488 int cpu = cpu_of(rq); 3487 int cpu = cpu_of(rq);
@@ -3643,9 +3642,9 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3643#define UPDATE_TG 0x0 3642#define UPDATE_TG 0x0
3644#define SKIP_AGE_LOAD 0x0 3643#define SKIP_AGE_LOAD 0x0
3645 3644
3646static inline void update_load_avg(struct sched_entity *se, int not_used1) 3645static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
3647{ 3646{
3648 cfs_rq_util_change(cfs_rq_of(se)); 3647 cfs_rq_util_change(cfs_rq);
3649} 3648}
3650 3649
3651static inline void 3650static inline void
@@ -3796,7 +3795,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3796 * its group cfs_rq 3795 * its group cfs_rq
3797 * - Add its new weight to cfs_rq->load.weight 3796 * - Add its new weight to cfs_rq->load.weight
3798 */ 3797 */
3799 update_load_avg(se, UPDATE_TG); 3798 update_load_avg(cfs_rq, se, UPDATE_TG);
3800 enqueue_entity_load_avg(cfs_rq, se); 3799 enqueue_entity_load_avg(cfs_rq, se);
3801 update_cfs_shares(se); 3800 update_cfs_shares(se);
3802 account_entity_enqueue(cfs_rq, se); 3801 account_entity_enqueue(cfs_rq, se);
@@ -3880,7 +3879,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3880 * - For group entity, update its weight to reflect the new share 3879 * - For group entity, update its weight to reflect the new share
3881 * of its group cfs_rq. 3880 * of its group cfs_rq.
3882 */ 3881 */
3883 update_load_avg(se, UPDATE_TG); 3882 update_load_avg(cfs_rq, se, UPDATE_TG);
3884 dequeue_entity_load_avg(cfs_rq, se); 3883 dequeue_entity_load_avg(cfs_rq, se);
3885 3884
3886 update_stats_dequeue(cfs_rq, se, flags); 3885 update_stats_dequeue(cfs_rq, se, flags);
@@ -3968,7 +3967,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
3968 */ 3967 */
3969 update_stats_wait_end(cfs_rq, se); 3968 update_stats_wait_end(cfs_rq, se);
3970 __dequeue_entity(cfs_rq, se); 3969 __dequeue_entity(cfs_rq, se);
3971 update_load_avg(se, UPDATE_TG); 3970 update_load_avg(cfs_rq, se, UPDATE_TG);
3972 } 3971 }
3973 3972
3974 update_stats_curr_start(cfs_rq, se); 3973 update_stats_curr_start(cfs_rq, se);
@@ -4070,7 +4069,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
4070 /* Put 'current' back into the tree. */ 4069 /* Put 'current' back into the tree. */
4071 __enqueue_entity(cfs_rq, prev); 4070 __enqueue_entity(cfs_rq, prev);
4072 /* in !on_rq case, update occurred at dequeue */ 4071 /* in !on_rq case, update occurred at dequeue */
4073 update_load_avg(prev, 0); 4072 update_load_avg(cfs_rq, prev, 0);
4074 } 4073 }
4075 cfs_rq->curr = NULL; 4074 cfs_rq->curr = NULL;
4076} 4075}
@@ -4086,7 +4085,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
4086 /* 4085 /*
4087 * Ensure that runnable average is periodically updated. 4086 * Ensure that runnable average is periodically updated.
4088 */ 4087 */
4089 update_load_avg(curr, UPDATE_TG); 4088 update_load_avg(cfs_rq, curr, UPDATE_TG);
4090 update_cfs_shares(curr); 4089 update_cfs_shares(curr);
4091 4090
4092#ifdef CONFIG_SCHED_HRTICK 4091#ifdef CONFIG_SCHED_HRTICK
@@ -5004,7 +5003,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
5004 if (cfs_rq_throttled(cfs_rq)) 5003 if (cfs_rq_throttled(cfs_rq))
5005 break; 5004 break;
5006 5005
5007 update_load_avg(se, UPDATE_TG); 5006 update_load_avg(cfs_rq, se, UPDATE_TG);
5008 update_cfs_shares(se); 5007 update_cfs_shares(se);
5009 } 5008 }
5010 5009
@@ -5063,7 +5062,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
5063 if (cfs_rq_throttled(cfs_rq)) 5062 if (cfs_rq_throttled(cfs_rq))
5064 break; 5063 break;
5065 5064
5066 update_load_avg(se, UPDATE_TG); 5065 update_load_avg(cfs_rq, se, UPDATE_TG);
5067 update_cfs_shares(se); 5066 update_cfs_shares(se);
5068 } 5067 }
5069 5068
@@ -7121,7 +7120,7 @@ static void update_blocked_averages(int cpu)
7121 /* Propagate pending load changes to the parent, if any: */ 7120 /* Propagate pending load changes to the parent, if any: */
7122 se = cfs_rq->tg->se[cpu]; 7121 se = cfs_rq->tg->se[cpu];
7123 if (se && !skip_blocked_update(se)) 7122 if (se && !skip_blocked_update(se))
7124 update_load_avg(se, 0); 7123 update_load_avg(cfs_rq_of(se), se, 0);
7125 7124
7126 /* 7125 /*
7127 * There can be a lot of idle CPU cgroups. Don't let fully 7126 * There can be a lot of idle CPU cgroups. Don't let fully
@@ -9295,7 +9294,7 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
9295 if (cfs_rq_throttled(cfs_rq)) 9294 if (cfs_rq_throttled(cfs_rq))
9296 break; 9295 break;
9297 9296
9298 update_load_avg(se, UPDATE_TG); 9297 update_load_avg(cfs_rq, se, UPDATE_TG);
9299 } 9298 }
9300} 9299}
9301#else 9300#else
@@ -9307,7 +9306,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se)
9307 struct cfs_rq *cfs_rq = cfs_rq_of(se); 9306 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9308 9307
9309 /* Catch up with the cfs_rq and remove our load when we leave */ 9308 /* Catch up with the cfs_rq and remove our load when we leave */
9310 update_load_avg(se, 0); 9309 update_load_avg(cfs_rq, se, 0);
9311 detach_entity_load_avg(cfs_rq, se); 9310 detach_entity_load_avg(cfs_rq, se);
9312 update_tg_load_avg(cfs_rq, false); 9311 update_tg_load_avg(cfs_rq, false);
9313 propagate_entity_cfs_rq(se); 9312 propagate_entity_cfs_rq(se);
@@ -9326,7 +9325,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
9326#endif 9325#endif
9327 9326
9328 /* Synchronize entity with its cfs_rq */ 9327 /* Synchronize entity with its cfs_rq */
9329 update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); 9328 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
9330 attach_entity_load_avg(cfs_rq, se); 9329 attach_entity_load_avg(cfs_rq, se);
9331 update_tg_load_avg(cfs_rq, false); 9330 update_tg_load_avg(cfs_rq, false);
9332 propagate_entity_cfs_rq(se); 9331 propagate_entity_cfs_rq(se);
@@ -9610,7 +9609,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
9610 rq_lock_irqsave(rq, &rf); 9609 rq_lock_irqsave(rq, &rf);
9611 update_rq_clock(rq); 9610 update_rq_clock(rq);
9612 for_each_sched_entity(se) { 9611 for_each_sched_entity(se) {
9613 update_load_avg(se, UPDATE_TG); 9612 update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
9614 update_cfs_shares(se); 9613 update_cfs_shares(se);
9615 } 9614 }
9616 rq_unlock_irqrestore(rq, &rf); 9615 rq_unlock_irqrestore(rq, &rf);