aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2011-02-01 09:50:51 -0500
committerIngo Molnar <mingo@elte.hu>2011-02-03 08:20:33 -0500
commitd95f412200652694e63e64bfd49f0ae274a54479 (patch)
treed2abb7b2c635bbc9f344b45051b6186b1317718c
parentac53db596cc08ecb8040cfb6f71ae40c6f2041c4 (diff)
sched: Add yield_to(task, preempt) functionality
Currently only implemented for fair class tasks. Add a yield_to_task method() to the fair scheduling class. allowing the caller of yield_to() to accelerate another thread in it's thread group, task group. Implemented via a scheduler hint, using cfs_rq->next to encourage the target being selected. We can rely on pick_next_entity to keep things fair, so noone can accelerate a thread that has already used its fair share of CPU time. This also means callers should only call yield_to when they really mean it. Calling it too often can result in the scheduler just ignoring the hint. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20110201095051.4ddb7738@annuminas.surriel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched.c85
-rw-r--r--kernel/sched_fair.c20
3 files changed, 107 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4e9fad271c30..c88b3bfbd09e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1058,6 +1058,7 @@ struct sched_class {
1058 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1058 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1059 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1059 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1060 void (*yield_task) (struct rq *rq); 1060 void (*yield_task) (struct rq *rq);
1061 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1061 1062
1062 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1063 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1063 1064
@@ -1972,6 +1973,7 @@ static inline int rt_mutex_getprio(struct task_struct *p)
1972# define rt_mutex_adjust_pi(p) do { } while (0) 1973# define rt_mutex_adjust_pi(p) do { } while (0)
1973#endif 1974#endif
1974 1975
1976extern bool yield_to(struct task_struct *p, bool preempt);
1975extern void set_user_nice(struct task_struct *p, long nice); 1977extern void set_user_nice(struct task_struct *p, long nice);
1976extern int task_prio(const struct task_struct *p); 1978extern int task_prio(const struct task_struct *p);
1977extern int task_nice(const struct task_struct *p); 1979extern int task_nice(const struct task_struct *p);
diff --git a/kernel/sched.c b/kernel/sched.c
index ae5e1a19b9d6..2effcb71a478 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1686,6 +1686,39 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1686 __release(rq2->lock); 1686 __release(rq2->lock);
1687} 1687}
1688 1688
1689#else /* CONFIG_SMP */
1690
1691/*
1692 * double_rq_lock - safely lock two runqueues
1693 *
1694 * Note this does not disable interrupts like task_rq_lock,
1695 * you need to do so manually before calling.
1696 */
1697static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1698 __acquires(rq1->lock)
1699 __acquires(rq2->lock)
1700{
1701 BUG_ON(!irqs_disabled());
1702 BUG_ON(rq1 != rq2);
1703 raw_spin_lock(&rq1->lock);
1704 __acquire(rq2->lock); /* Fake it out ;) */
1705}
1706
1707/*
1708 * double_rq_unlock - safely unlock two runqueues
1709 *
1710 * Note this does not restore interrupts like task_rq_unlock,
1711 * you need to do so manually after calling.
1712 */
1713static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1714 __releases(rq1->lock)
1715 __releases(rq2->lock)
1716{
1717 BUG_ON(rq1 != rq2);
1718 raw_spin_unlock(&rq1->lock);
1719 __release(rq2->lock);
1720}
1721
1689#endif 1722#endif
1690 1723
1691static void calc_load_account_idle(struct rq *this_rq); 1724static void calc_load_account_idle(struct rq *this_rq);
@@ -5448,6 +5481,58 @@ void __sched yield(void)
5448} 5481}
5449EXPORT_SYMBOL(yield); 5482EXPORT_SYMBOL(yield);
5450 5483
5484/**
5485 * yield_to - yield the current processor to another thread in
5486 * your thread group, or accelerate that thread toward the
5487 * processor it's on.
5488 *
5489 * It's the caller's job to ensure that the target task struct
5490 * can't go away on us before we can do any checks.
5491 *
5492 * Returns true if we indeed boosted the target task.
5493 */
5494bool __sched yield_to(struct task_struct *p, bool preempt)
5495{
5496 struct task_struct *curr = current;
5497 struct rq *rq, *p_rq;
5498 unsigned long flags;
5499 bool yielded = 0;
5500
5501 local_irq_save(flags);
5502 rq = this_rq();
5503
5504again:
5505 p_rq = task_rq(p);
5506 double_rq_lock(rq, p_rq);
5507 while (task_rq(p) != p_rq) {
5508 double_rq_unlock(rq, p_rq);
5509 goto again;
5510 }
5511
5512 if (!curr->sched_class->yield_to_task)
5513 goto out;
5514
5515 if (curr->sched_class != p->sched_class)
5516 goto out;
5517
5518 if (task_running(p_rq, p) || p->state)
5519 goto out;
5520
5521 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
5522 if (yielded)
5523 schedstat_inc(rq, yld_count);
5524
5525out:
5526 double_rq_unlock(rq, p_rq);
5527 local_irq_restore(flags);
5528
5529 if (yielded)
5530 schedule();
5531
5532 return yielded;
5533}
5534EXPORT_SYMBOL_GPL(yield_to);
5535
5451/* 5536/*
5452 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 5537 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
5453 * that process accounting knows that this is a task in IO wait state. 5538 * that process accounting knows that this is a task in IO wait state.
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c0fbeb992833..027024694043 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1975,6 +1975,25 @@ static void yield_task_fair(struct rq *rq)
1975 set_skip_buddy(se); 1975 set_skip_buddy(se);
1976} 1976}
1977 1977
1978static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
1979{
1980 struct sched_entity *se = &p->se;
1981
1982 if (!se->on_rq)
1983 return false;
1984
1985 /* Tell the scheduler that we'd really like pse to run next. */
1986 set_next_buddy(se);
1987
1988 /* Make p's CPU reschedule; pick_next_entity takes care of fairness. */
1989 if (preempt)
1990 resched_task(rq->curr);
1991
1992 yield_task_fair(rq);
1993
1994 return true;
1995}
1996
1978#ifdef CONFIG_SMP 1997#ifdef CONFIG_SMP
1979/************************************************** 1998/**************************************************
1980 * Fair scheduling class load-balancing methods: 1999 * Fair scheduling class load-balancing methods:
@@ -4243,6 +4262,7 @@ static const struct sched_class fair_sched_class = {
4243 .enqueue_task = enqueue_task_fair, 4262 .enqueue_task = enqueue_task_fair,
4244 .dequeue_task = dequeue_task_fair, 4263 .dequeue_task = dequeue_task_fair,
4245 .yield_task = yield_task_fair, 4264 .yield_task = yield_task_fair,
4265 .yield_to_task = yield_to_task_fair,
4246 4266
4247 .check_preempt_curr = check_preempt_wakeup, 4267 .check_preempt_curr = check_preempt_wakeup,
4248 4268