diff options
Diffstat (limited to 'kernel/sched_fair.c')
| -rw-r--r-- | kernel/sched_fair.c | 32 |
1 files changed, 21 insertions, 11 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5cc1c162044f..a7e50ba185ac 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -719,7 +719,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | |||
| 719 | __enqueue_entity(cfs_rq, se); | 719 | __enqueue_entity(cfs_rq, se); |
| 720 | } | 720 | } |
| 721 | 721 | ||
| 722 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | 722 | static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 723 | { | 723 | { |
| 724 | if (cfs_rq->last == se) | 724 | if (cfs_rq->last == se) |
| 725 | cfs_rq->last = NULL; | 725 | cfs_rq->last = NULL; |
| @@ -728,6 +728,12 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 728 | cfs_rq->next = NULL; | 728 | cfs_rq->next = NULL; |
| 729 | } | 729 | } |
| 730 | 730 | ||
| 731 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
| 732 | { | ||
| 733 | for_each_sched_entity(se) | ||
| 734 | __clear_buddies(cfs_rq_of(se), se); | ||
| 735 | } | ||
| 736 | |||
| 731 | static void | 737 | static void |
| 732 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | 738 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) |
| 733 | { | 739 | { |
| @@ -768,8 +774,14 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
| 768 | 774 | ||
| 769 | ideal_runtime = sched_slice(cfs_rq, curr); | 775 | ideal_runtime = sched_slice(cfs_rq, curr); |
| 770 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | 776 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; |
| 771 | if (delta_exec > ideal_runtime) | 777 | if (delta_exec > ideal_runtime) { |
| 772 | resched_task(rq_of(cfs_rq)->curr); | 778 | resched_task(rq_of(cfs_rq)->curr); |
| 779 | /* | ||
| 780 | * The current task ran long enough, ensure it doesn't get | ||
| 781 | * re-elected due to buddy favours. | ||
| 782 | */ | ||
| 783 | clear_buddies(cfs_rq, curr); | ||
| 784 | } | ||
| 773 | } | 785 | } |
| 774 | 786 | ||
| 775 | static void | 787 | static void |
| @@ -1179,20 +1191,15 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq, | |||
| 1179 | int idx, unsigned long load, unsigned long this_load, | 1191 | int idx, unsigned long load, unsigned long this_load, |
| 1180 | unsigned int imbalance) | 1192 | unsigned int imbalance) |
| 1181 | { | 1193 | { |
| 1182 | struct task_struct *curr = this_rq->curr; | ||
| 1183 | struct task_group *tg; | ||
| 1184 | unsigned long tl = this_load; | 1194 | unsigned long tl = this_load; |
| 1185 | unsigned long tl_per_task; | 1195 | unsigned long tl_per_task; |
| 1196 | struct task_group *tg; | ||
| 1186 | unsigned long weight; | 1197 | unsigned long weight; |
| 1187 | int balanced; | 1198 | int balanced; |
| 1188 | 1199 | ||
| 1189 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) | 1200 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) |
| 1190 | return 0; | 1201 | return 0; |
| 1191 | 1202 | ||
| 1192 | if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost || | ||
| 1193 | p->se.avg_overlap > sysctl_sched_migration_cost)) | ||
| 1194 | sync = 0; | ||
| 1195 | |||
| 1196 | /* | 1203 | /* |
| 1197 | * If sync wakeup then subtract the (maximum possible) | 1204 | * If sync wakeup then subtract the (maximum possible) |
| 1198 | * effect of the currently running task from the load | 1205 | * effect of the currently running task from the load |
| @@ -1419,9 +1426,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | |||
| 1419 | if (!sched_feat(WAKEUP_PREEMPT)) | 1426 | if (!sched_feat(WAKEUP_PREEMPT)) |
| 1420 | return; | 1427 | return; |
| 1421 | 1428 | ||
| 1422 | if (sched_feat(WAKEUP_OVERLAP) && (sync || | 1429 | if (sched_feat(WAKEUP_OVERLAP) && sync) { |
| 1423 | (se->avg_overlap < sysctl_sched_migration_cost && | ||
| 1424 | pse->avg_overlap < sysctl_sched_migration_cost))) { | ||
| 1425 | resched_task(curr); | 1430 | resched_task(curr); |
| 1426 | return; | 1431 | return; |
| 1427 | } | 1432 | } |
| @@ -1452,6 +1457,11 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) | |||
| 1452 | 1457 | ||
| 1453 | do { | 1458 | do { |
| 1454 | se = pick_next_entity(cfs_rq); | 1459 | se = pick_next_entity(cfs_rq); |
| 1460 | /* | ||
| 1461 | * If se was a buddy, clear it so that it will have to earn | ||
| 1462 | * the favour again. | ||
| 1463 | */ | ||
| 1464 | __clear_buddies(cfs_rq, se); | ||
| 1455 | set_next_entity(cfs_rq, se); | 1465 | set_next_entity(cfs_rq, se); |
| 1456 | cfs_rq = group_cfs_rq(se); | 1466 | cfs_rq = group_cfs_rq(se); |
| 1457 | } while (cfs_rq); | 1467 | } while (cfs_rq); |
