diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-11-28 12:51:02 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-12-09 04:03:07 -0500 |
commit | 3a7e73a2e26fffdbc46ba95fc0425418984f5140 (patch) | |
tree | 02fdd1c081acfde7522a971bc2969bca76ca90c7 /kernel/sched_fair.c | |
parent | a65ac745e47e91f9d98dbf07f22ed0492e34d998 (diff) |
sched: Clean up check_preempt_wakeup()
Streamline the wakeup preemption code a bit, unifying the preempt path
so that they all do the same.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 73 |
1 files changed, 33 insertions, 40 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 4dec18579c9a..76b5792c4198 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1651,10 +1651,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
1651 | int sync = wake_flags & WF_SYNC; | 1651 | int sync = wake_flags & WF_SYNC; |
1652 | int scale = cfs_rq->nr_running >= sched_nr_latency; | 1652 | int scale = cfs_rq->nr_running >= sched_nr_latency; |
1653 | 1653 | ||
1654 | if (unlikely(rt_prio(p->prio))) { | 1654 | if (unlikely(rt_prio(p->prio))) |
1655 | resched_task(curr); | 1655 | goto preempt; |
1656 | return; | ||
1657 | } | ||
1658 | 1656 | ||
1659 | if (unlikely(p->sched_class != &fair_sched_class)) | 1657 | if (unlikely(p->sched_class != &fair_sched_class)) |
1660 | return; | 1658 | return; |
@@ -1680,52 +1678,47 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
1680 | return; | 1678 | return; |
1681 | 1679 | ||
1682 | /* Idle tasks are by definition preempted by everybody. */ | 1680 | /* Idle tasks are by definition preempted by everybody. */ |
1683 | if (unlikely(curr->policy == SCHED_IDLE)) { | 1681 | if (unlikely(curr->policy == SCHED_IDLE)) |
1684 | resched_task(curr); | 1682 | goto preempt; |
1685 | return; | ||
1686 | } | ||
1687 | 1683 | ||
1688 | if ((sched_feat(WAKEUP_SYNC) && sync) || | 1684 | if (sched_feat(WAKEUP_SYNC) && sync) |
1689 | (sched_feat(WAKEUP_OVERLAP) && | 1685 | goto preempt; |
1690 | (se->avg_overlap < sysctl_sched_migration_cost && | ||
1691 | pse->avg_overlap < sysctl_sched_migration_cost))) { | ||
1692 | resched_task(curr); | ||
1693 | return; | ||
1694 | } | ||
1695 | 1686 | ||
1696 | if (sched_feat(WAKEUP_RUNNING)) { | 1687 | if (sched_feat(WAKEUP_OVERLAP) && |
1697 | if (pse->avg_running < se->avg_running) { | 1688 | se->avg_overlap < sysctl_sched_migration_cost && |
1698 | set_next_buddy(pse); | 1689 | pse->avg_overlap < sysctl_sched_migration_cost) |
1699 | resched_task(curr); | 1690 | goto preempt; |
1700 | return; | 1691 | |
1701 | } | 1692 | if (sched_feat(WAKEUP_RUNNING) && pse->avg_running < se->avg_running) |
1702 | } | 1693 | goto preempt; |
1703 | 1694 | ||
1704 | if (!sched_feat(WAKEUP_PREEMPT)) | 1695 | if (!sched_feat(WAKEUP_PREEMPT)) |
1705 | return; | 1696 | return; |
1706 | 1697 | ||
1698 | update_curr(cfs_rq); | ||
1707 | find_matching_se(&se, &pse); | 1699 | find_matching_se(&se, &pse); |
1708 | |||
1709 | BUG_ON(!pse); | 1700 | BUG_ON(!pse); |
1701 | if (wakeup_preempt_entity(se, pse) == 1) | ||
1702 | goto preempt; | ||
1710 | 1703 | ||
1711 | update_curr(cfs_rq); | 1704 | return; |
1712 | 1705 | ||
1713 | if (wakeup_preempt_entity(se, pse) == 1) { | 1706 | preempt: |
1714 | resched_task(curr); | 1707 | resched_task(curr); |
1715 | /* | 1708 | /* |
1716 | * Only set the backward buddy when the current task is still | 1709 | * Only set the backward buddy when the current task is still |
1717 | * on the rq. This can happen when a wakeup gets interleaved | 1710 | * on the rq. This can happen when a wakeup gets interleaved |
1718 | * with schedule on the ->pre_schedule() or idle_balance() | 1711 | * with schedule on the ->pre_schedule() or idle_balance() |
1719 | * point, either of which can * drop the rq lock. | 1712 | * point, either of which can * drop the rq lock. |
1720 | * | 1713 | * |
1721 | * Also, during early boot the idle thread is in the fair class, | 1714 | * Also, during early boot the idle thread is in the fair class, |
1722 | * for obvious reasons its a bad idea to schedule back to it. | 1715 | * for obvious reasons its a bad idea to schedule back to it. |
1723 | */ | 1716 | */ |
1724 | if (unlikely(!se->on_rq || curr == rq->idle)) | 1717 | if (unlikely(!se->on_rq || curr == rq->idle)) |
1725 | return; | 1718 | return; |
1726 | if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) | 1719 | |
1727 | set_last_buddy(se); | 1720 | if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) |
1728 | } | 1721 | set_last_buddy(se); |
1729 | } | 1722 | } |
1730 | 1723 | ||
1731 | static struct task_struct *pick_next_task_fair(struct rq *rq) | 1724 | static struct task_struct *pick_next_task_fair(struct rq *rq) |