diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 69 |
1 files changed, 48 insertions, 21 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 8e1352c75557..a7e50ba185ac 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -283,7 +283,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq) | |||
283 | struct sched_entity, | 283 | struct sched_entity, |
284 | run_node); | 284 | run_node); |
285 | 285 | ||
286 | if (vruntime == cfs_rq->min_vruntime) | 286 | if (!cfs_rq->curr) |
287 | vruntime = se->vruntime; | 287 | vruntime = se->vruntime; |
288 | else | 288 | else |
289 | vruntime = min_vruntime(vruntime, se->vruntime); | 289 | vruntime = min_vruntime(vruntime, se->vruntime); |
@@ -429,7 +429,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
429 | u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); | 429 | u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); |
430 | 430 | ||
431 | for_each_sched_entity(se) { | 431 | for_each_sched_entity(se) { |
432 | struct load_weight *load = &cfs_rq->load; | 432 | struct load_weight *load; |
433 | |||
434 | cfs_rq = cfs_rq_of(se); | ||
435 | load = &cfs_rq->load; | ||
433 | 436 | ||
434 | if (unlikely(!se->on_rq)) { | 437 | if (unlikely(!se->on_rq)) { |
435 | struct load_weight lw = cfs_rq->load; | 438 | struct load_weight lw = cfs_rq->load; |
@@ -677,9 +680,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
677 | unsigned long thresh = sysctl_sched_latency; | 680 | unsigned long thresh = sysctl_sched_latency; |
678 | 681 | ||
679 | /* | 682 | /* |
680 | * convert the sleeper threshold into virtual time | 683 | * Convert the sleeper threshold into virtual time. |
684 | * SCHED_IDLE is a special sub-class. We care about | ||
685 | * fairness only relative to other SCHED_IDLE tasks, | ||
686 | * all of which have the same weight. | ||
681 | */ | 687 | */ |
682 | if (sched_feat(NORMALIZED_SLEEPER)) | 688 | if (sched_feat(NORMALIZED_SLEEPER) && |
689 | task_of(se)->policy != SCHED_IDLE) | ||
683 | thresh = calc_delta_fair(thresh, se); | 690 | thresh = calc_delta_fair(thresh, se); |
684 | 691 | ||
685 | vruntime -= thresh; | 692 | vruntime -= thresh; |
@@ -712,7 +719,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | |||
712 | __enqueue_entity(cfs_rq, se); | 719 | __enqueue_entity(cfs_rq, se); |
713 | } | 720 | } |
714 | 721 | ||
715 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | 722 | static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) |
716 | { | 723 | { |
717 | if (cfs_rq->last == se) | 724 | if (cfs_rq->last == se) |
718 | cfs_rq->last = NULL; | 725 | cfs_rq->last = NULL; |
@@ -721,6 +728,12 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
721 | cfs_rq->next = NULL; | 728 | cfs_rq->next = NULL; |
722 | } | 729 | } |
723 | 730 | ||
731 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
732 | { | ||
733 | for_each_sched_entity(se) | ||
734 | __clear_buddies(cfs_rq_of(se), se); | ||
735 | } | ||
736 | |||
724 | static void | 737 | static void |
725 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | 738 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) |
726 | { | 739 | { |
@@ -761,8 +774,14 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
761 | 774 | ||
762 | ideal_runtime = sched_slice(cfs_rq, curr); | 775 | ideal_runtime = sched_slice(cfs_rq, curr); |
763 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | 776 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; |
764 | if (delta_exec > ideal_runtime) | 777 | if (delta_exec > ideal_runtime) { |
765 | resched_task(rq_of(cfs_rq)->curr); | 778 | resched_task(rq_of(cfs_rq)->curr); |
779 | /* | ||
780 | * The current task ran long enough, ensure it doesn't get | ||
781 | * re-elected due to buddy favours. | ||
782 | */ | ||
783 | clear_buddies(cfs_rq, curr); | ||
784 | } | ||
766 | } | 785 | } |
767 | 786 | ||
768 | static void | 787 | static void |
@@ -1172,20 +1191,15 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq, | |||
1172 | int idx, unsigned long load, unsigned long this_load, | 1191 | int idx, unsigned long load, unsigned long this_load, |
1173 | unsigned int imbalance) | 1192 | unsigned int imbalance) |
1174 | { | 1193 | { |
1175 | struct task_struct *curr = this_rq->curr; | ||
1176 | struct task_group *tg; | ||
1177 | unsigned long tl = this_load; | 1194 | unsigned long tl = this_load; |
1178 | unsigned long tl_per_task; | 1195 | unsigned long tl_per_task; |
1196 | struct task_group *tg; | ||
1179 | unsigned long weight; | 1197 | unsigned long weight; |
1180 | int balanced; | 1198 | int balanced; |
1181 | 1199 | ||
1182 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) | 1200 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) |
1183 | return 0; | 1201 | return 0; |
1184 | 1202 | ||
1185 | if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost || | ||
1186 | p->se.avg_overlap > sysctl_sched_migration_cost)) | ||
1187 | sync = 0; | ||
1188 | |||
1189 | /* | 1203 | /* |
1190 | * If sync wakeup then subtract the (maximum possible) | 1204 | * If sync wakeup then subtract the (maximum possible) |
1191 | * effect of the currently running task from the load | 1205 | * effect of the currently running task from the load |
@@ -1340,14 +1354,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | |||
1340 | 1354 | ||
1341 | static void set_last_buddy(struct sched_entity *se) | 1355 | static void set_last_buddy(struct sched_entity *se) |
1342 | { | 1356 | { |
1343 | for_each_sched_entity(se) | 1357 | if (likely(task_of(se)->policy != SCHED_IDLE)) { |
1344 | cfs_rq_of(se)->last = se; | 1358 | for_each_sched_entity(se) |
1359 | cfs_rq_of(se)->last = se; | ||
1360 | } | ||
1345 | } | 1361 | } |
1346 | 1362 | ||
1347 | static void set_next_buddy(struct sched_entity *se) | 1363 | static void set_next_buddy(struct sched_entity *se) |
1348 | { | 1364 | { |
1349 | for_each_sched_entity(se) | 1365 | if (likely(task_of(se)->policy != SCHED_IDLE)) { |
1350 | cfs_rq_of(se)->next = se; | 1366 | for_each_sched_entity(se) |
1367 | cfs_rq_of(se)->next = se; | ||
1368 | } | ||
1351 | } | 1369 | } |
1352 | 1370 | ||
1353 | /* | 1371 | /* |
@@ -1393,18 +1411,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | |||
1393 | return; | 1411 | return; |
1394 | 1412 | ||
1395 | /* | 1413 | /* |
1396 | * Batch tasks do not preempt (their preemption is driven by | 1414 | * Batch and idle tasks do not preempt (their preemption is driven by |
1397 | * the tick): | 1415 | * the tick): |
1398 | */ | 1416 | */ |
1399 | if (unlikely(p->policy == SCHED_BATCH)) | 1417 | if (unlikely(p->policy != SCHED_NORMAL)) |
1400 | return; | 1418 | return; |
1401 | 1419 | ||
1420 | /* Idle tasks are by definition preempted by everybody. */ | ||
1421 | if (unlikely(curr->policy == SCHED_IDLE)) { | ||
1422 | resched_task(curr); | ||
1423 | return; | ||
1424 | } | ||
1425 | |||
1402 | if (!sched_feat(WAKEUP_PREEMPT)) | 1426 | if (!sched_feat(WAKEUP_PREEMPT)) |
1403 | return; | 1427 | return; |
1404 | 1428 | ||
1405 | if (sched_feat(WAKEUP_OVERLAP) && (sync || | 1429 | if (sched_feat(WAKEUP_OVERLAP) && sync) { |
1406 | (se->avg_overlap < sysctl_sched_migration_cost && | ||
1407 | pse->avg_overlap < sysctl_sched_migration_cost))) { | ||
1408 | resched_task(curr); | 1430 | resched_task(curr); |
1409 | return; | 1431 | return; |
1410 | } | 1432 | } |
@@ -1435,6 +1457,11 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) | |||
1435 | 1457 | ||
1436 | do { | 1458 | do { |
1437 | se = pick_next_entity(cfs_rq); | 1459 | se = pick_next_entity(cfs_rq); |
1460 | /* | ||
1461 | * If se was a buddy, clear it so that it will have to earn | ||
1462 | * the favour again. | ||
1463 | */ | ||
1464 | __clear_buddies(cfs_rq, se); | ||
1438 | set_next_entity(cfs_rq, se); | 1465 | set_next_entity(cfs_rq, se); |
1439 | cfs_rq = group_cfs_rq(se); | 1466 | cfs_rq = group_cfs_rq(se); |
1440 | } while (cfs_rq); | 1467 | } while (cfs_rq); |