aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c69
1 files changed, 48 insertions, 21 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index bdf64346b4d1..bc1563e7a248 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -283,7 +283,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
283 struct sched_entity, 283 struct sched_entity,
284 run_node); 284 run_node);
285 285
286 if (vruntime == cfs_rq->min_vruntime) 286 if (!cfs_rq->curr)
287 vruntime = se->vruntime; 287 vruntime = se->vruntime;
288 else 288 else
289 vruntime = min_vruntime(vruntime, se->vruntime); 289 vruntime = min_vruntime(vruntime, se->vruntime);
@@ -429,7 +429,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
429 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); 429 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
430 430
431 for_each_sched_entity(se) { 431 for_each_sched_entity(se) {
432 struct load_weight *load = &cfs_rq->load; 432 struct load_weight *load;
433
434 cfs_rq = cfs_rq_of(se);
435 load = &cfs_rq->load;
433 436
434 if (unlikely(!se->on_rq)) { 437 if (unlikely(!se->on_rq)) {
435 struct load_weight lw = cfs_rq->load; 438 struct load_weight lw = cfs_rq->load;
@@ -677,9 +680,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
677 unsigned long thresh = sysctl_sched_latency; 680 unsigned long thresh = sysctl_sched_latency;
678 681
679 /* 682 /*
680 * convert the sleeper threshold into virtual time 683 * Convert the sleeper threshold into virtual time.
684 * SCHED_IDLE is a special sub-class. We care about
685 * fairness only relative to other SCHED_IDLE tasks,
686 * all of which have the same weight.
681 */ 687 */
682 if (sched_feat(NORMALIZED_SLEEPER)) 688 if (sched_feat(NORMALIZED_SLEEPER) &&
689 task_of(se)->policy != SCHED_IDLE)
683 thresh = calc_delta_fair(thresh, se); 690 thresh = calc_delta_fair(thresh, se);
684 691
685 vruntime -= thresh; 692 vruntime -= thresh;
@@ -712,7 +719,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
712 __enqueue_entity(cfs_rq, se); 719 __enqueue_entity(cfs_rq, se);
713} 720}
714 721
715static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) 722static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
716{ 723{
717 if (cfs_rq->last == se) 724 if (cfs_rq->last == se)
718 cfs_rq->last = NULL; 725 cfs_rq->last = NULL;
@@ -721,6 +728,12 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
721 cfs_rq->next = NULL; 728 cfs_rq->next = NULL;
722} 729}
723 730
731static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
732{
733 for_each_sched_entity(se)
734 __clear_buddies(cfs_rq_of(se), se);
735}
736
724static void 737static void
725dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) 738dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
726{ 739{
@@ -761,8 +774,14 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
761 774
762 ideal_runtime = sched_slice(cfs_rq, curr); 775 ideal_runtime = sched_slice(cfs_rq, curr);
763 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; 776 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
764 if (delta_exec > ideal_runtime) 777 if (delta_exec > ideal_runtime) {
765 resched_task(rq_of(cfs_rq)->curr); 778 resched_task(rq_of(cfs_rq)->curr);
779 /*
780 * The current task ran long enough, ensure it doesn't get
781 * re-elected due to buddy favours.
782 */
783 clear_buddies(cfs_rq, curr);
784 }
766} 785}
767 786
768static void 787static void
@@ -1172,20 +1191,15 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
1172 int idx, unsigned long load, unsigned long this_load, 1191 int idx, unsigned long load, unsigned long this_load,
1173 unsigned int imbalance) 1192 unsigned int imbalance)
1174{ 1193{
1175 struct task_struct *curr = this_rq->curr;
1176 struct task_group *tg;
1177 unsigned long tl = this_load; 1194 unsigned long tl = this_load;
1178 unsigned long tl_per_task; 1195 unsigned long tl_per_task;
1196 struct task_group *tg;
1179 unsigned long weight; 1197 unsigned long weight;
1180 int balanced; 1198 int balanced;
1181 1199
1182 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) 1200 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
1183 return 0; 1201 return 0;
1184 1202
1185 if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1186 p->se.avg_overlap > sysctl_sched_migration_cost))
1187 sync = 0;
1188
1189 /* 1203 /*
1190 * If sync wakeup then subtract the (maximum possible) 1204 * If sync wakeup then subtract the (maximum possible)
1191 * effect of the currently running task from the load 1205 * effect of the currently running task from the load
@@ -1387,14 +1401,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1387 1401
1388static void set_last_buddy(struct sched_entity *se) 1402static void set_last_buddy(struct sched_entity *se)
1389{ 1403{
1390 for_each_sched_entity(se) 1404 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1391 cfs_rq_of(se)->last = se; 1405 for_each_sched_entity(se)
1406 cfs_rq_of(se)->last = se;
1407 }
1392} 1408}
1393 1409
1394static void set_next_buddy(struct sched_entity *se) 1410static void set_next_buddy(struct sched_entity *se)
1395{ 1411{
1396 for_each_sched_entity(se) 1412 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1397 cfs_rq_of(se)->next = se; 1413 for_each_sched_entity(se)
1414 cfs_rq_of(se)->next = se;
1415 }
1398} 1416}
1399 1417
1400/* 1418/*
@@ -1440,18 +1458,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1440 return; 1458 return;
1441 1459
1442 /* 1460 /*
1443 * Batch tasks do not preempt (their preemption is driven by 1461 * Batch and idle tasks do not preempt (their preemption is driven by
1444 * the tick): 1462 * the tick):
1445 */ 1463 */
1446 if (unlikely(p->policy == SCHED_BATCH)) 1464 if (unlikely(p->policy != SCHED_NORMAL))
1447 return; 1465 return;
1448 1466
1467 /* Idle tasks are by definition preempted by everybody. */
1468 if (unlikely(curr->policy == SCHED_IDLE)) {
1469 resched_task(curr);
1470 return;
1471 }
1472
1449 if (!sched_feat(WAKEUP_PREEMPT)) 1473 if (!sched_feat(WAKEUP_PREEMPT))
1450 return; 1474 return;
1451 1475
1452 if (sched_feat(WAKEUP_OVERLAP) && (sync || 1476 if (sched_feat(WAKEUP_OVERLAP) && sync) {
1453 (se->avg_overlap < sysctl_sched_migration_cost &&
1454 pse->avg_overlap < sysctl_sched_migration_cost))) {
1455 resched_task(curr); 1477 resched_task(curr);
1456 return; 1478 return;
1457 } 1479 }
@@ -1482,6 +1504,11 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
1482 1504
1483 do { 1505 do {
1484 se = pick_next_entity(cfs_rq); 1506 se = pick_next_entity(cfs_rq);
1507 /*
1508 * If se was a buddy, clear it so that it will have to earn
1509 * the favour again.
1510 */
1511 __clear_buddies(cfs_rq, se);
1485 set_next_entity(cfs_rq, se); 1512 set_next_entity(cfs_rq, se);
1486 cfs_rq = group_cfs_rq(se); 1513 cfs_rq = group_cfs_rq(se);
1487 } while (cfs_rq); 1514 } while (cfs_rq);