diff options
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 9 |
1 files changed, 4 insertions, 5 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 40667cbf371b..7ce18f3c097a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -676,7 +676,6 @@ void init_task_runnable_average(struct task_struct *p) | |||
676 | { | 676 | { |
677 | u32 slice; | 677 | u32 slice; |
678 | 678 | ||
679 | p->se.avg.decay_count = 0; | ||
680 | slice = sched_slice(task_cfs_rq(p), &p->se) >> 10; | 679 | slice = sched_slice(task_cfs_rq(p), &p->se) >> 10; |
681 | p->se.avg.runnable_avg_sum = slice; | 680 | p->se.avg.runnable_avg_sum = slice; |
682 | p->se.avg.runnable_avg_period = slice; | 681 | p->se.avg.runnable_avg_period = slice; |
@@ -1730,7 +1729,7 @@ static int preferred_group_nid(struct task_struct *p, int nid) | |||
1730 | nodes = node_online_map; | 1729 | nodes = node_online_map; |
1731 | for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { | 1730 | for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { |
1732 | unsigned long max_faults = 0; | 1731 | unsigned long max_faults = 0; |
1733 | nodemask_t max_group; | 1732 | nodemask_t max_group = NODE_MASK_NONE; |
1734 | int a, b; | 1733 | int a, b; |
1735 | 1734 | ||
1736 | /* Are there nodes at this distance from each other? */ | 1735 | /* Are there nodes at this distance from each other? */ |
@@ -2574,11 +2573,11 @@ static inline u64 __synchronize_entity_decay(struct sched_entity *se) | |||
2574 | u64 decays = atomic64_read(&cfs_rq->decay_counter); | 2573 | u64 decays = atomic64_read(&cfs_rq->decay_counter); |
2575 | 2574 | ||
2576 | decays -= se->avg.decay_count; | 2575 | decays -= se->avg.decay_count; |
2576 | se->avg.decay_count = 0; | ||
2577 | if (!decays) | 2577 | if (!decays) |
2578 | return 0; | 2578 | return 0; |
2579 | 2579 | ||
2580 | se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays); | 2580 | se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays); |
2581 | se->avg.decay_count = 0; | ||
2582 | 2581 | ||
2583 | return decays; | 2582 | return decays; |
2584 | } | 2583 | } |
@@ -5157,7 +5156,7 @@ static void yield_task_fair(struct rq *rq) | |||
5157 | * so we don't do microscopic update in schedule() | 5156 | * so we don't do microscopic update in schedule() |
5158 | * and double the fastpath cost. | 5157 | * and double the fastpath cost. |
5159 | */ | 5158 | */ |
5160 | rq->skip_clock_update = 1; | 5159 | rq_clock_skip_update(rq, true); |
5161 | } | 5160 | } |
5162 | 5161 | ||
5163 | set_skip_buddy(se); | 5162 | set_skip_buddy(se); |
@@ -5949,8 +5948,8 @@ static unsigned long scale_rt_capacity(int cpu) | |||
5949 | */ | 5948 | */ |
5950 | age_stamp = ACCESS_ONCE(rq->age_stamp); | 5949 | age_stamp = ACCESS_ONCE(rq->age_stamp); |
5951 | avg = ACCESS_ONCE(rq->rt_avg); | 5950 | avg = ACCESS_ONCE(rq->rt_avg); |
5951 | delta = __rq_clock_broken(rq) - age_stamp; | ||
5952 | 5952 | ||
5953 | delta = rq_clock(rq) - age_stamp; | ||
5954 | if (unlikely(delta < 0)) | 5953 | if (unlikely(delta < 0)) |
5955 | delta = 0; | 5954 | delta = 0; |
5956 | 5955 | ||