diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 6 | ||||
-rw-r--r-- | kernel/sched_fair.c | 32 | ||||
-rw-r--r-- | kernel/sched_features.h | 1 |
3 files changed, 30 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index e8819bc6f462..82cc839c9210 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -397,7 +397,7 @@ struct cfs_rq { | |||
397 | * 'curr' points to currently running entity on this cfs_rq. | 397 | * 'curr' points to currently running entity on this cfs_rq. |
398 | * It is set to NULL otherwise (i.e when none are currently running). | 398 | * It is set to NULL otherwise (i.e when none are currently running). |
399 | */ | 399 | */ |
400 | struct sched_entity *curr, *next; | 400 | struct sched_entity *curr, *next, *last; |
401 | 401 | ||
402 | unsigned long nr_spread_over; | 402 | unsigned long nr_spread_over; |
403 | 403 | ||
@@ -1805,7 +1805,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
1805 | /* | 1805 | /* |
1806 | * Buddy candidates are cache hot: | 1806 | * Buddy candidates are cache hot: |
1807 | */ | 1807 | */ |
1808 | if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next)) | 1808 | if (sched_feat(CACHE_HOT_BUDDY) && |
1809 | (&p->se == cfs_rq_of(&p->se)->next || | ||
1810 | &p->se == cfs_rq_of(&p->se)->last)) | ||
1809 | return 1; | 1811 | return 1; |
1810 | 1812 | ||
1811 | if (p->sched_class != &fair_sched_class) | 1813 | if (p->sched_class != &fair_sched_class) |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ebd6de8d17fd..a6b1db8a0bd8 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -341,9 +341,6 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
341 | cfs_rq->rb_leftmost = next_node; | 341 | cfs_rq->rb_leftmost = next_node; |
342 | } | 342 | } |
343 | 343 | ||
344 | if (cfs_rq->next == se) | ||
345 | cfs_rq->next = NULL; | ||
346 | |||
347 | rb_erase(&se->run_node, &cfs_rq->tasks_timeline); | 344 | rb_erase(&se->run_node, &cfs_rq->tasks_timeline); |
348 | } | 345 | } |
349 | 346 | ||
@@ -741,6 +738,12 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
741 | #endif | 738 | #endif |
742 | } | 739 | } |
743 | 740 | ||
741 | if (cfs_rq->last == se) | ||
742 | cfs_rq->last = NULL; | ||
743 | |||
744 | if (cfs_rq->next == se) | ||
745 | cfs_rq->next = NULL; | ||
746 | |||
744 | if (se != cfs_rq->curr) | 747 | if (se != cfs_rq->curr) |
745 | __dequeue_entity(cfs_rq, se); | 748 | __dequeue_entity(cfs_rq, se); |
746 | account_entity_dequeue(cfs_rq, se); | 749 | account_entity_dequeue(cfs_rq, se); |
@@ -798,10 +801,13 @@ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | |||
798 | { | 801 | { |
799 | struct sched_entity *se = __pick_next_entity(cfs_rq); | 802 | struct sched_entity *se = __pick_next_entity(cfs_rq); |
800 | 803 | ||
801 | if (!cfs_rq->next || wakeup_preempt_entity(cfs_rq->next, se) == 1) | 804 | if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1) |
802 | return se; | 805 | return cfs_rq->next; |
803 | 806 | ||
804 | return cfs_rq->next; | 807 | if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1) |
808 | return cfs_rq->last; | ||
809 | |||
810 | return se; | ||
805 | } | 811 | } |
806 | 812 | ||
807 | static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | 813 | static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) |
@@ -1319,10 +1325,11 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | |||
1319 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | 1325 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) |
1320 | { | 1326 | { |
1321 | struct task_struct *curr = rq->curr; | 1327 | struct task_struct *curr = rq->curr; |
1322 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | ||
1323 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1328 | struct sched_entity *se = &curr->se, *pse = &p->se; |
1324 | 1329 | ||
1325 | if (unlikely(rt_prio(p->prio))) { | 1330 | if (unlikely(rt_prio(p->prio))) { |
1331 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | ||
1332 | |||
1326 | update_rq_clock(rq); | 1333 | update_rq_clock(rq); |
1327 | update_curr(cfs_rq); | 1334 | update_curr(cfs_rq); |
1328 | resched_task(curr); | 1335 | resched_task(curr); |
@@ -1335,6 +1342,17 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | |||
1335 | if (unlikely(se == pse)) | 1342 | if (unlikely(se == pse)) |
1336 | return; | 1343 | return; |
1337 | 1344 | ||
1345 | /* | ||
1346 | * Only set the backward buddy when the current task is still on the | ||
1347 | * rq. This can happen when a wakeup gets interleaved with schedule on | ||
1348 | * the ->pre_schedule() or idle_balance() point, either of which can | ||
1349 | * drop the rq lock. | ||
1350 | * | ||
1351 | * Also, during early boot the idle thread is in the fair class, for | ||
1352 | * obvious reasons its a bad idea to schedule back to the idle thread. | ||
1353 | */ | ||
1354 | if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle)) | ||
1355 | cfs_rq_of(se)->last = se; | ||
1338 | cfs_rq_of(pse)->next = pse; | 1356 | cfs_rq_of(pse)->next = pse; |
1339 | 1357 | ||
1340 | /* | 1358 | /* |
diff --git a/kernel/sched_features.h b/kernel/sched_features.h index fda016218296..da5d93b5d2c6 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h | |||
@@ -12,3 +12,4 @@ SCHED_FEAT(LB_BIAS, 1) | |||
12 | SCHED_FEAT(LB_WAKEUP_UPDATE, 1) | 12 | SCHED_FEAT(LB_WAKEUP_UPDATE, 1) |
13 | SCHED_FEAT(ASYM_EFF_LOAD, 1) | 13 | SCHED_FEAT(ASYM_EFF_LOAD, 1) |
14 | SCHED_FEAT(WAKEUP_OVERLAP, 0) | 14 | SCHED_FEAT(WAKEUP_OVERLAP, 0) |
15 | SCHED_FEAT(LAST_BUDDY, 1) | ||