aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c21
1 files changed, 2 insertions, 19 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 2e43d4a748c3..f2aa987027d6 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -726,21 +726,6 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
726 __enqueue_entity(cfs_rq, se); 726 __enqueue_entity(cfs_rq, se);
727} 727}
728 728
729static void update_avg(u64 *avg, u64 sample)
730{
731 s64 diff = sample - *avg;
732 *avg += diff >> 3;
733}
734
735static void update_avg_stats(struct cfs_rq *cfs_rq, struct sched_entity *se)
736{
737 if (!se->last_wakeup)
738 return;
739
740 update_avg(&se->avg_overlap, se->sum_exec_runtime - se->last_wakeup);
741 se->last_wakeup = 0;
742}
743
744static void 729static void
745dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) 730dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
746{ 731{
@@ -751,7 +736,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
751 736
752 update_stats_dequeue(cfs_rq, se); 737 update_stats_dequeue(cfs_rq, se);
753 if (sleep) { 738 if (sleep) {
754 update_avg_stats(cfs_rq, se);
755#ifdef CONFIG_SCHEDSTATS 739#ifdef CONFIG_SCHEDSTATS
756 if (entity_is_task(se)) { 740 if (entity_is_task(se)) {
757 struct task_struct *tsk = task_of(se); 741 struct task_struct *tsk = task_of(se);
@@ -1196,9 +1180,9 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
1196 * a reasonable amount of time then attract this newly 1180 * a reasonable amount of time then attract this newly
1197 * woken task: 1181 * woken task:
1198 */ 1182 */
1199 if (sync && balanced && curr->sched_class == &fair_sched_class) { 1183 if (sync && balanced) {
1200 if (curr->se.avg_overlap < sysctl_sched_migration_cost && 1184 if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
1201 p->se.avg_overlap < sysctl_sched_migration_cost) 1185 p->se.avg_overlap < sysctl_sched_migration_cost)
1202 return 1; 1186 return 1;
1203 } 1187 }
1204 1188
@@ -1359,7 +1343,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
1359 return; 1343 return;
1360 } 1344 }
1361 1345
1362 se->last_wakeup = se->sum_exec_runtime;
1363 if (unlikely(se == pse)) 1346 if (unlikely(se == pse))
1364 return; 1347 return;
1365 1348