diff options
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 29 |
1 files changed, 10 insertions, 19 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5eea8707234a..7a33e5986fc5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1680,9 +1680,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
1680 | } | 1680 | } |
1681 | 1681 | ||
1682 | /* ensure we never gain time by being placed backwards. */ | 1682 | /* ensure we never gain time by being placed backwards. */ |
1683 | vruntime = max_vruntime(se->vruntime, vruntime); | 1683 | se->vruntime = max_vruntime(se->vruntime, vruntime); |
1684 | |||
1685 | se->vruntime = vruntime; | ||
1686 | } | 1684 | } |
1687 | 1685 | ||
1688 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq); | 1686 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq); |
@@ -2663,7 +2661,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | |||
2663 | hrtimer_cancel(&cfs_b->slack_timer); | 2661 | hrtimer_cancel(&cfs_b->slack_timer); |
2664 | } | 2662 | } |
2665 | 2663 | ||
2666 | static void unthrottle_offline_cfs_rqs(struct rq *rq) | 2664 | static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) |
2667 | { | 2665 | { |
2668 | struct cfs_rq *cfs_rq; | 2666 | struct cfs_rq *cfs_rq; |
2669 | 2667 | ||
@@ -3254,25 +3252,18 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) | |||
3254 | */ | 3252 | */ |
3255 | static int select_idle_sibling(struct task_struct *p, int target) | 3253 | static int select_idle_sibling(struct task_struct *p, int target) |
3256 | { | 3254 | { |
3257 | int cpu = smp_processor_id(); | ||
3258 | int prev_cpu = task_cpu(p); | ||
3259 | struct sched_domain *sd; | 3255 | struct sched_domain *sd; |
3260 | struct sched_group *sg; | 3256 | struct sched_group *sg; |
3261 | int i; | 3257 | int i = task_cpu(p); |
3262 | 3258 | ||
3263 | /* | 3259 | if (idle_cpu(target)) |
3264 | * If the task is going to be woken-up on this cpu and if it is | 3260 | return target; |
3265 | * already idle, then it is the right target. | ||
3266 | */ | ||
3267 | if (target == cpu && idle_cpu(cpu)) | ||
3268 | return cpu; | ||
3269 | 3261 | ||
3270 | /* | 3262 | /* |
3271 | * If the task is going to be woken-up on the cpu where it previously | 3263 | * If the prevous cpu is cache affine and idle, don't be stupid. |
3272 | * ran and if it is currently idle, then it the right target. | ||
3273 | */ | 3264 | */ |
3274 | if (target == prev_cpu && idle_cpu(prev_cpu)) | 3265 | if (i != target && cpus_share_cache(i, target) && idle_cpu(i)) |
3275 | return prev_cpu; | 3266 | return i; |
3276 | 3267 | ||
3277 | /* | 3268 | /* |
3278 | * Otherwise, iterate the domains and find an elegible idle cpu. | 3269 | * Otherwise, iterate the domains and find an elegible idle cpu. |
@@ -3286,7 +3277,7 @@ static int select_idle_sibling(struct task_struct *p, int target) | |||
3286 | goto next; | 3277 | goto next; |
3287 | 3278 | ||
3288 | for_each_cpu(i, sched_group_cpus(sg)) { | 3279 | for_each_cpu(i, sched_group_cpus(sg)) { |
3289 | if (!idle_cpu(i)) | 3280 | if (i == target || !idle_cpu(i)) |
3290 | goto next; | 3281 | goto next; |
3291 | } | 3282 | } |
3292 | 3283 | ||
@@ -6101,7 +6092,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task | |||
6101 | * idle runqueue: | 6092 | * idle runqueue: |
6102 | */ | 6093 | */ |
6103 | if (rq->cfs.load.weight) | 6094 | if (rq->cfs.load.weight) |
6104 | rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); | 6095 | rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); |
6105 | 6096 | ||
6106 | return rr_interval; | 6097 | return rr_interval; |
6107 | } | 6098 | } |