diff options
Diffstat (limited to 'kernel/sched_fair.c')
| -rw-r--r-- | kernel/sched_fair.c | 74 |
1 files changed, 47 insertions, 27 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 4e777b47eeda..37087a7fac22 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -822,6 +822,26 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
| 822 | * re-elected due to buddy favours. | 822 | * re-elected due to buddy favours. |
| 823 | */ | 823 | */ |
| 824 | clear_buddies(cfs_rq, curr); | 824 | clear_buddies(cfs_rq, curr); |
| 825 | return; | ||
| 826 | } | ||
| 827 | |||
| 828 | /* | ||
| 829 | * Ensure that a task that missed wakeup preemption by a | ||
| 830 | * narrow margin doesn't have to wait for a full slice. | ||
| 831 | * This also mitigates buddy induced latencies under load. | ||
| 832 | */ | ||
| 833 | if (!sched_feat(WAKEUP_PREEMPT)) | ||
| 834 | return; | ||
| 835 | |||
| 836 | if (delta_exec < sysctl_sched_min_granularity) | ||
| 837 | return; | ||
| 838 | |||
| 839 | if (cfs_rq->nr_running > 1) { | ||
| 840 | struct sched_entity *se = __pick_next_entity(cfs_rq); | ||
| 841 | s64 delta = curr->vruntime - se->vruntime; | ||
| 842 | |||
| 843 | if (delta > ideal_runtime) | ||
| 844 | resched_task(rq_of(cfs_rq)->curr); | ||
| 825 | } | 845 | } |
| 826 | } | 846 | } |
| 827 | 847 | ||
| @@ -861,12 +881,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); | |||
| 861 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | 881 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) |
| 862 | { | 882 | { |
| 863 | struct sched_entity *se = __pick_next_entity(cfs_rq); | 883 | struct sched_entity *se = __pick_next_entity(cfs_rq); |
| 884 | struct sched_entity *left = se; | ||
| 864 | 885 | ||
| 865 | if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1) | 886 | if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) |
| 866 | return cfs_rq->next; | 887 | se = cfs_rq->next; |
| 867 | 888 | ||
| 868 | if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1) | 889 | /* |
| 869 | return cfs_rq->last; | 890 | * Prefer last buddy, try to return the CPU to a preempted task. |
| 891 | */ | ||
| 892 | if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) | ||
| 893 | se = cfs_rq->last; | ||
| 894 | |||
| 895 | clear_buddies(cfs_rq, se); | ||
| 870 | 896 | ||
| 871 | return se; | 897 | return se; |
| 872 | } | 898 | } |
| @@ -1568,6 +1594,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
| 1568 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1594 | struct sched_entity *se = &curr->se, *pse = &p->se; |
| 1569 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 1595 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
| 1570 | int sync = wake_flags & WF_SYNC; | 1596 | int sync = wake_flags & WF_SYNC; |
| 1597 | int scale = cfs_rq->nr_running >= sched_nr_latency; | ||
| 1571 | 1598 | ||
| 1572 | update_curr(cfs_rq); | 1599 | update_curr(cfs_rq); |
| 1573 | 1600 | ||
| @@ -1582,18 +1609,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
| 1582 | if (unlikely(se == pse)) | 1609 | if (unlikely(se == pse)) |
| 1583 | return; | 1610 | return; |
| 1584 | 1611 | ||
| 1585 | /* | 1612 | if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) |
| 1586 | * Only set the backward buddy when the current task is still on the | ||
| 1587 | * rq. This can happen when a wakeup gets interleaved with schedule on | ||
| 1588 | * the ->pre_schedule() or idle_balance() point, either of which can | ||
| 1589 | * drop the rq lock. | ||
| 1590 | * | ||
| 1591 | * Also, during early boot the idle thread is in the fair class, for | ||
| 1592 | * obvious reasons its a bad idea to schedule back to the idle thread. | ||
| 1593 | */ | ||
| 1594 | if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle)) | ||
| 1595 | set_last_buddy(se); | ||
| 1596 | if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK)) | ||
| 1597 | set_next_buddy(pse); | 1613 | set_next_buddy(pse); |
| 1598 | 1614 | ||
| 1599 | /* | 1615 | /* |
| @@ -1639,8 +1655,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
| 1639 | 1655 | ||
| 1640 | BUG_ON(!pse); | 1656 | BUG_ON(!pse); |
| 1641 | 1657 | ||
| 1642 | if (wakeup_preempt_entity(se, pse) == 1) | 1658 | if (wakeup_preempt_entity(se, pse) == 1) { |
| 1643 | resched_task(curr); | 1659 | resched_task(curr); |
| 1660 | /* | ||
| 1661 | * Only set the backward buddy when the current task is still | ||
| 1662 | * on the rq. This can happen when a wakeup gets interleaved | ||
| 1663 | * with schedule on the ->pre_schedule() or idle_balance() | ||
| 1664 | * point, either of which can * drop the rq lock. | ||
| 1665 | * | ||
| 1666 | * Also, during early boot the idle thread is in the fair class, | ||
| 1667 | * for obvious reasons its a bad idea to schedule back to it. | ||
| 1668 | */ | ||
| 1669 | if (unlikely(!se->on_rq || curr == rq->idle)) | ||
| 1670 | return; | ||
| 1671 | if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) | ||
| 1672 | set_last_buddy(se); | ||
| 1673 | } | ||
| 1644 | } | 1674 | } |
| 1645 | 1675 | ||
| 1646 | static struct task_struct *pick_next_task_fair(struct rq *rq) | 1676 | static struct task_struct *pick_next_task_fair(struct rq *rq) |
| @@ -1654,16 +1684,6 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) | |||
| 1654 | 1684 | ||
| 1655 | do { | 1685 | do { |
| 1656 | se = pick_next_entity(cfs_rq); | 1686 | se = pick_next_entity(cfs_rq); |
| 1657 | /* | ||
| 1658 | * If se was a buddy, clear it so that it will have to earn | ||
| 1659 | * the favour again. | ||
| 1660 | * | ||
| 1661 | * If se was not a buddy, clear the buddies because neither | ||
| 1662 | * was elegible to run, let them earn it again. | ||
| 1663 | * | ||
| 1664 | * IOW. unconditionally clear buddies. | ||
| 1665 | */ | ||
| 1666 | __clear_buddies(cfs_rq, NULL); | ||
| 1667 | set_next_entity(cfs_rq, se); | 1687 | set_next_entity(cfs_rq, se); |
| 1668 | cfs_rq = group_cfs_rq(se); | 1688 | cfs_rq = group_cfs_rq(se); |
| 1669 | } while (cfs_rq); | 1689 | } while (cfs_rq); |
