aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2011-02-01 09:50:51 -0500
committerIngo Molnar <mingo@elte.hu>2011-02-03 08:20:33 -0500
commitd95f412200652694e63e64bfd49f0ae274a54479 (patch)
treed2abb7b2c635bbc9f344b45051b6186b1317718c /kernel/sched_fair.c
parentac53db596cc08ecb8040cfb6f71ae40c6f2041c4 (diff)
sched: Add yield_to(task, preempt) functionality
Currently only implemented for fair class tasks. Add a yield_to_task method() to the fair scheduling class. allowing the caller of yield_to() to accelerate another thread in it's thread group, task group. Implemented via a scheduler hint, using cfs_rq->next to encourage the target being selected. We can rely on pick_next_entity to keep things fair, so noone can accelerate a thread that has already used its fair share of CPU time. This also means callers should only call yield_to when they really mean it. Calling it too often can result in the scheduler just ignoring the hint. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20110201095051.4ddb7738@annuminas.surriel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c0fbeb992833..027024694043 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1975,6 +1975,25 @@ static void yield_task_fair(struct rq *rq)
1975 set_skip_buddy(se); 1975 set_skip_buddy(se);
1976} 1976}
1977 1977
1978static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
1979{
1980 struct sched_entity *se = &p->se;
1981
1982 if (!se->on_rq)
1983 return false;
1984
1985 /* Tell the scheduler that we'd really like pse to run next. */
1986 set_next_buddy(se);
1987
1988 /* Make p's CPU reschedule; pick_next_entity takes care of fairness. */
1989 if (preempt)
1990 resched_task(rq->curr);
1991
1992 yield_task_fair(rq);
1993
1994 return true;
1995}
1996
1978#ifdef CONFIG_SMP 1997#ifdef CONFIG_SMP
1979/************************************************** 1998/**************************************************
1980 * Fair scheduling class load-balancing methods: 1999 * Fair scheduling class load-balancing methods:
@@ -4243,6 +4262,7 @@ static const struct sched_class fair_sched_class = {
4243 .enqueue_task = enqueue_task_fair, 4262 .enqueue_task = enqueue_task_fair,
4244 .dequeue_task = dequeue_task_fair, 4263 .dequeue_task = dequeue_task_fair,
4245 .yield_task = yield_task_fair, 4264 .yield_task = yield_task_fair,
4265 .yield_to_task = yield_to_task_fair,
4246 4266
4247 .check_preempt_curr = check_preempt_wakeup, 4267 .check_preempt_curr = check_preempt_wakeup,
4248 4268