aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-11-26 04:50:39 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-26 04:50:42 -0500
commit16bc67edeb49b531940b2ba6c183780a1b5c472d (patch)
tree71b4bc48e47e54f2c0b3126d8f81d2f31b707ea8 /kernel/sched_fair.c
parentf6630114d9198aa959ac95c131334c020038f253 (diff)
parent047106adcc85e3023da210143a6ab8a55df9e0fc (diff)
Merge branch 'sched/urgent' into sched/core
Merge reason: Pick up fixes that did not make it into .32.0 Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c74
1 files changed, 47 insertions, 27 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 24086e7e7593..f61837ad336d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -822,6 +822,26 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
822 * re-elected due to buddy favours. 822 * re-elected due to buddy favours.
823 */ 823 */
824 clear_buddies(cfs_rq, curr); 824 clear_buddies(cfs_rq, curr);
825 return;
826 }
827
828 /*
829 * Ensure that a task that missed wakeup preemption by a
830 * narrow margin doesn't have to wait for a full slice.
831 * This also mitigates buddy induced latencies under load.
832 */
833 if (!sched_feat(WAKEUP_PREEMPT))
834 return;
835
836 if (delta_exec < sysctl_sched_min_granularity)
837 return;
838
839 if (cfs_rq->nr_running > 1) {
840 struct sched_entity *se = __pick_next_entity(cfs_rq);
841 s64 delta = curr->vruntime - se->vruntime;
842
843 if (delta > ideal_runtime)
844 resched_task(rq_of(cfs_rq)->curr);
825 } 845 }
826} 846}
827 847
@@ -861,12 +881,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
861static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) 881static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
862{ 882{
863 struct sched_entity *se = __pick_next_entity(cfs_rq); 883 struct sched_entity *se = __pick_next_entity(cfs_rq);
884 struct sched_entity *left = se;
864 885
865 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1) 886 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
866 return cfs_rq->next; 887 se = cfs_rq->next;
867 888
868 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1) 889 /*
869 return cfs_rq->last; 890 * Prefer last buddy, try to return the CPU to a preempted task.
891 */
892 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
893 se = cfs_rq->last;
894
895 clear_buddies(cfs_rq, se);
870 896
871 return se; 897 return se;
872} 898}
@@ -1623,6 +1649,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1623 struct sched_entity *se = &curr->se, *pse = &p->se; 1649 struct sched_entity *se = &curr->se, *pse = &p->se;
1624 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 1650 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1625 int sync = wake_flags & WF_SYNC; 1651 int sync = wake_flags & WF_SYNC;
1652 int scale = cfs_rq->nr_running >= sched_nr_latency;
1626 1653
1627 update_curr(cfs_rq); 1654 update_curr(cfs_rq);
1628 1655
@@ -1637,18 +1664,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1637 if (unlikely(se == pse)) 1664 if (unlikely(se == pse))
1638 return; 1665 return;
1639 1666
1640 /* 1667 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
1641 * Only set the backward buddy when the current task is still on the
1642 * rq. This can happen when a wakeup gets interleaved with schedule on
1643 * the ->pre_schedule() or idle_balance() point, either of which can
1644 * drop the rq lock.
1645 *
1646 * Also, during early boot the idle thread is in the fair class, for
1647 * obvious reasons its a bad idea to schedule back to the idle thread.
1648 */
1649 if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
1650 set_last_buddy(se);
1651 if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK))
1652 set_next_buddy(pse); 1668 set_next_buddy(pse);
1653 1669
1654 /* 1670 /*
@@ -1694,8 +1710,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1694 1710
1695 BUG_ON(!pse); 1711 BUG_ON(!pse);
1696 1712
1697 if (wakeup_preempt_entity(se, pse) == 1) 1713 if (wakeup_preempt_entity(se, pse) == 1) {
1698 resched_task(curr); 1714 resched_task(curr);
1715 /*
1716 * Only set the backward buddy when the current task is still
1717 * on the rq. This can happen when a wakeup gets interleaved
1718 * with schedule on the ->pre_schedule() or idle_balance()
1719 * point, either of which can * drop the rq lock.
1720 *
1721 * Also, during early boot the idle thread is in the fair class,
1722 * for obvious reasons its a bad idea to schedule back to it.
1723 */
1724 if (unlikely(!se->on_rq || curr == rq->idle))
1725 return;
1726 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1727 set_last_buddy(se);
1728 }
1699} 1729}
1700 1730
1701static struct task_struct *pick_next_task_fair(struct rq *rq) 1731static struct task_struct *pick_next_task_fair(struct rq *rq)
@@ -1709,16 +1739,6 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
1709 1739
1710 do { 1740 do {
1711 se = pick_next_entity(cfs_rq); 1741 se = pick_next_entity(cfs_rq);
1712 /*
1713 * If se was a buddy, clear it so that it will have to earn
1714 * the favour again.
1715 *
1716 * If se was not a buddy, clear the buddies because neither
1717 * was elegible to run, let them earn it again.
1718 *
1719 * IOW. unconditionally clear buddies.
1720 */
1721 __clear_buddies(cfs_rq, NULL);
1722 set_next_entity(cfs_rq, se); 1742 set_next_entity(cfs_rq, se);
1723 cfs_rq = group_cfs_rq(se); 1743 cfs_rq = group_cfs_rq(se);
1724 } while (cfs_rq); 1744 } while (cfs_rq);