diff options
Diffstat (limited to 'kernel/sched/fair.c')
| -rw-r--r-- | kernel/sched/fair.c | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index df2cdf77f899..7ce18f3c097a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -676,7 +676,6 @@ void init_task_runnable_average(struct task_struct *p) | |||
| 676 | { | 676 | { |
| 677 | u32 slice; | 677 | u32 slice; |
| 678 | 678 | ||
| 679 | p->se.avg.decay_count = 0; | ||
| 680 | slice = sched_slice(task_cfs_rq(p), &p->se) >> 10; | 679 | slice = sched_slice(task_cfs_rq(p), &p->se) >> 10; |
| 681 | p->se.avg.runnable_avg_sum = slice; | 680 | p->se.avg.runnable_avg_sum = slice; |
| 682 | p->se.avg.runnable_avg_period = slice; | 681 | p->se.avg.runnable_avg_period = slice; |
| @@ -1730,7 +1729,7 @@ static int preferred_group_nid(struct task_struct *p, int nid) | |||
| 1730 | nodes = node_online_map; | 1729 | nodes = node_online_map; |
| 1731 | for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { | 1730 | for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { |
| 1732 | unsigned long max_faults = 0; | 1731 | unsigned long max_faults = 0; |
| 1733 | nodemask_t max_group; | 1732 | nodemask_t max_group = NODE_MASK_NONE; |
| 1734 | int a, b; | 1733 | int a, b; |
| 1735 | 1734 | ||
| 1736 | /* Are there nodes at this distance from each other? */ | 1735 | /* Are there nodes at this distance from each other? */ |
| @@ -2574,11 +2573,11 @@ static inline u64 __synchronize_entity_decay(struct sched_entity *se) | |||
| 2574 | u64 decays = atomic64_read(&cfs_rq->decay_counter); | 2573 | u64 decays = atomic64_read(&cfs_rq->decay_counter); |
| 2575 | 2574 | ||
| 2576 | decays -= se->avg.decay_count; | 2575 | decays -= se->avg.decay_count; |
| 2576 | se->avg.decay_count = 0; | ||
| 2577 | if (!decays) | 2577 | if (!decays) |
| 2578 | return 0; | 2578 | return 0; |
| 2579 | 2579 | ||
| 2580 | se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays); | 2580 | se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays); |
| 2581 | se->avg.decay_count = 0; | ||
| 2582 | 2581 | ||
| 2583 | return decays; | 2582 | return decays; |
| 2584 | } | 2583 | } |
| @@ -4005,6 +4004,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force) | |||
| 4005 | 4004 | ||
| 4006 | static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | 4005 | static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) |
| 4007 | { | 4006 | { |
| 4007 | /* init_cfs_bandwidth() was not called */ | ||
| 4008 | if (!cfs_b->throttled_cfs_rq.next) | ||
| 4009 | return; | ||
| 4010 | |||
| 4008 | hrtimer_cancel(&cfs_b->period_timer); | 4011 | hrtimer_cancel(&cfs_b->period_timer); |
| 4009 | hrtimer_cancel(&cfs_b->slack_timer); | 4012 | hrtimer_cancel(&cfs_b->slack_timer); |
| 4010 | } | 4013 | } |
| @@ -4424,7 +4427,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) | |||
| 4424 | * wl = S * s'_i; see (2) | 4427 | * wl = S * s'_i; see (2) |
| 4425 | */ | 4428 | */ |
| 4426 | if (W > 0 && w < W) | 4429 | if (W > 0 && w < W) |
| 4427 | wl = (w * tg->shares) / W; | 4430 | wl = (w * (long)tg->shares) / W; |
| 4428 | else | 4431 | else |
| 4429 | wl = tg->shares; | 4432 | wl = tg->shares; |
| 4430 | 4433 | ||
| @@ -5153,7 +5156,7 @@ static void yield_task_fair(struct rq *rq) | |||
| 5153 | * so we don't do microscopic update in schedule() | 5156 | * so we don't do microscopic update in schedule() |
| 5154 | * and double the fastpath cost. | 5157 | * and double the fastpath cost. |
| 5155 | */ | 5158 | */ |
| 5156 | rq->skip_clock_update = 1; | 5159 | rq_clock_skip_update(rq, true); |
| 5157 | } | 5160 | } |
| 5158 | 5161 | ||
| 5159 | set_skip_buddy(se); | 5162 | set_skip_buddy(se); |
| @@ -5945,8 +5948,8 @@ static unsigned long scale_rt_capacity(int cpu) | |||
| 5945 | */ | 5948 | */ |
| 5946 | age_stamp = ACCESS_ONCE(rq->age_stamp); | 5949 | age_stamp = ACCESS_ONCE(rq->age_stamp); |
| 5947 | avg = ACCESS_ONCE(rq->rt_avg); | 5950 | avg = ACCESS_ONCE(rq->rt_avg); |
| 5951 | delta = __rq_clock_broken(rq) - age_stamp; | ||
| 5948 | 5952 | ||
| 5949 | delta = rq_clock(rq) - age_stamp; | ||
| 5950 | if (unlikely(delta < 0)) | 5953 | if (unlikely(delta < 0)) |
| 5951 | delta = 0; | 5954 | delta = 0; |
| 5952 | 5955 | ||
