diff options
| -rw-r--r-- | kernel/sched.c | 14 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 21 |
2 files changed, 16 insertions, 19 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index d99aeabeb72f..bbc40c3a0657 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -1693,6 +1693,12 @@ static void set_load_weight(struct task_struct *p) | |||
| 1693 | p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO]; | 1693 | p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO]; |
| 1694 | } | 1694 | } |
| 1695 | 1695 | ||
| 1696 | static void update_avg(u64 *avg, u64 sample) | ||
| 1697 | { | ||
| 1698 | s64 diff = sample - *avg; | ||
| 1699 | *avg += diff >> 3; | ||
| 1700 | } | ||
| 1701 | |||
| 1696 | static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) | 1702 | static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) |
| 1697 | { | 1703 | { |
| 1698 | sched_info_queued(p); | 1704 | sched_info_queued(p); |
| @@ -1702,6 +1708,12 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) | |||
| 1702 | 1708 | ||
| 1703 | static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) | 1709 | static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) |
| 1704 | { | 1710 | { |
| 1711 | if (sleep && p->se.last_wakeup) { | ||
| 1712 | update_avg(&p->se.avg_overlap, | ||
| 1713 | p->se.sum_exec_runtime - p->se.last_wakeup); | ||
| 1714 | p->se.last_wakeup = 0; | ||
| 1715 | } | ||
| 1716 | |||
| 1705 | p->sched_class->dequeue_task(rq, p, sleep); | 1717 | p->sched_class->dequeue_task(rq, p, sleep); |
| 1706 | p->se.on_rq = 0; | 1718 | p->se.on_rq = 0; |
| 1707 | } | 1719 | } |
| @@ -2313,6 +2325,8 @@ out_running: | |||
| 2313 | p->sched_class->task_wake_up(rq, p); | 2325 | p->sched_class->task_wake_up(rq, p); |
| 2314 | #endif | 2326 | #endif |
| 2315 | out: | 2327 | out: |
| 2328 | current->se.last_wakeup = current->se.sum_exec_runtime; | ||
| 2329 | |||
| 2316 | task_rq_unlock(rq, &flags); | 2330 | task_rq_unlock(rq, &flags); |
| 2317 | 2331 | ||
| 2318 | return success; | 2332 | return success; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 2e43d4a748c3..f2aa987027d6 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -726,21 +726,6 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | |||
| 726 | __enqueue_entity(cfs_rq, se); | 726 | __enqueue_entity(cfs_rq, se); |
| 727 | } | 727 | } |
| 728 | 728 | ||
| 729 | static void update_avg(u64 *avg, u64 sample) | ||
| 730 | { | ||
| 731 | s64 diff = sample - *avg; | ||
| 732 | *avg += diff >> 3; | ||
| 733 | } | ||
| 734 | |||
| 735 | static void update_avg_stats(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
| 736 | { | ||
| 737 | if (!se->last_wakeup) | ||
| 738 | return; | ||
| 739 | |||
| 740 | update_avg(&se->avg_overlap, se->sum_exec_runtime - se->last_wakeup); | ||
| 741 | se->last_wakeup = 0; | ||
| 742 | } | ||
| 743 | |||
| 744 | static void | 729 | static void |
| 745 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | 730 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) |
| 746 | { | 731 | { |
| @@ -751,7 +736,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
| 751 | 736 | ||
| 752 | update_stats_dequeue(cfs_rq, se); | 737 | update_stats_dequeue(cfs_rq, se); |
| 753 | if (sleep) { | 738 | if (sleep) { |
| 754 | update_avg_stats(cfs_rq, se); | ||
| 755 | #ifdef CONFIG_SCHEDSTATS | 739 | #ifdef CONFIG_SCHEDSTATS |
| 756 | if (entity_is_task(se)) { | 740 | if (entity_is_task(se)) { |
| 757 | struct task_struct *tsk = task_of(se); | 741 | struct task_struct *tsk = task_of(se); |
| @@ -1196,9 +1180,9 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, | |||
| 1196 | * a reasonable amount of time then attract this newly | 1180 | * a reasonable amount of time then attract this newly |
| 1197 | * woken task: | 1181 | * woken task: |
| 1198 | */ | 1182 | */ |
| 1199 | if (sync && balanced && curr->sched_class == &fair_sched_class) { | 1183 | if (sync && balanced) { |
| 1200 | if (curr->se.avg_overlap < sysctl_sched_migration_cost && | 1184 | if (curr->se.avg_overlap < sysctl_sched_migration_cost && |
| 1201 | p->se.avg_overlap < sysctl_sched_migration_cost) | 1185 | p->se.avg_overlap < sysctl_sched_migration_cost) |
| 1202 | return 1; | 1186 | return 1; |
| 1203 | } | 1187 | } |
| 1204 | 1188 | ||
| @@ -1359,7 +1343,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
| 1359 | return; | 1343 | return; |
| 1360 | } | 1344 | } |
| 1361 | 1345 | ||
| 1362 | se->last_wakeup = se->sum_exec_runtime; | ||
| 1363 | if (unlikely(se == pse)) | 1346 | if (unlikely(se == pse)) |
| 1364 | return; | 1347 | return; |
| 1365 | 1348 | ||
