diff options
| author | Mike Galbraith <efault@gmx.de> | 2011-02-01 09:50:51 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2011-02-03 08:20:33 -0500 |
| commit | d95f412200652694e63e64bfd49f0ae274a54479 (patch) | |
| tree | d2abb7b2c635bbc9f344b45051b6186b1317718c /kernel | |
| parent | ac53db596cc08ecb8040cfb6f71ae40c6f2041c4 (diff) | |
sched: Add yield_to(task, preempt) functionality
Currently only implemented for fair class tasks.
Add a yield_to_task method() to the fair scheduling class. allowing the
caller of yield_to() to accelerate another thread in it's thread group,
task group.
Implemented via a scheduler hint, using cfs_rq->next to encourage the
target being selected. We can rely on pick_next_entity to keep things
fair, so noone can accelerate a thread that has already used its fair
share of CPU time.
This also means callers should only call yield_to when they really
mean it. Calling it too often can result in the scheduler just
ignoring the hint.
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20110201095051.4ddb7738@annuminas.surriel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched.c | 85 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 20 |
2 files changed, 105 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index ae5e1a19b9d..2effcb71a47 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -1686,6 +1686,39 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) | |||
| 1686 | __release(rq2->lock); | 1686 | __release(rq2->lock); |
| 1687 | } | 1687 | } |
| 1688 | 1688 | ||
| 1689 | #else /* CONFIG_SMP */ | ||
| 1690 | |||
| 1691 | /* | ||
| 1692 | * double_rq_lock - safely lock two runqueues | ||
| 1693 | * | ||
| 1694 | * Note this does not disable interrupts like task_rq_lock, | ||
| 1695 | * you need to do so manually before calling. | ||
| 1696 | */ | ||
| 1697 | static void double_rq_lock(struct rq *rq1, struct rq *rq2) | ||
| 1698 | __acquires(rq1->lock) | ||
| 1699 | __acquires(rq2->lock) | ||
| 1700 | { | ||
| 1701 | BUG_ON(!irqs_disabled()); | ||
| 1702 | BUG_ON(rq1 != rq2); | ||
| 1703 | raw_spin_lock(&rq1->lock); | ||
| 1704 | __acquire(rq2->lock); /* Fake it out ;) */ | ||
| 1705 | } | ||
| 1706 | |||
| 1707 | /* | ||
| 1708 | * double_rq_unlock - safely unlock two runqueues | ||
| 1709 | * | ||
| 1710 | * Note this does not restore interrupts like task_rq_unlock, | ||
| 1711 | * you need to do so manually after calling. | ||
| 1712 | */ | ||
| 1713 | static void double_rq_unlock(struct rq *rq1, struct rq *rq2) | ||
| 1714 | __releases(rq1->lock) | ||
| 1715 | __releases(rq2->lock) | ||
| 1716 | { | ||
| 1717 | BUG_ON(rq1 != rq2); | ||
| 1718 | raw_spin_unlock(&rq1->lock); | ||
| 1719 | __release(rq2->lock); | ||
| 1720 | } | ||
| 1721 | |||
| 1689 | #endif | 1722 | #endif |
| 1690 | 1723 | ||
| 1691 | static void calc_load_account_idle(struct rq *this_rq); | 1724 | static void calc_load_account_idle(struct rq *this_rq); |
| @@ -5448,6 +5481,58 @@ void __sched yield(void) | |||
| 5448 | } | 5481 | } |
| 5449 | EXPORT_SYMBOL(yield); | 5482 | EXPORT_SYMBOL(yield); |
| 5450 | 5483 | ||
| 5484 | /** | ||
| 5485 | * yield_to - yield the current processor to another thread in | ||
| 5486 | * your thread group, or accelerate that thread toward the | ||
| 5487 | * processor it's on. | ||
| 5488 | * | ||
| 5489 | * It's the caller's job to ensure that the target task struct | ||
| 5490 | * can't go away on us before we can do any checks. | ||
| 5491 | * | ||
| 5492 | * Returns true if we indeed boosted the target task. | ||
| 5493 | */ | ||
| 5494 | bool __sched yield_to(struct task_struct *p, bool preempt) | ||
| 5495 | { | ||
| 5496 | struct task_struct *curr = current; | ||
| 5497 | struct rq *rq, *p_rq; | ||
| 5498 | unsigned long flags; | ||
| 5499 | bool yielded = 0; | ||
| 5500 | |||
| 5501 | local_irq_save(flags); | ||
| 5502 | rq = this_rq(); | ||
| 5503 | |||
| 5504 | again: | ||
| 5505 | p_rq = task_rq(p); | ||
| 5506 | double_rq_lock(rq, p_rq); | ||
| 5507 | while (task_rq(p) != p_rq) { | ||
| 5508 | double_rq_unlock(rq, p_rq); | ||
| 5509 | goto again; | ||
| 5510 | } | ||
| 5511 | |||
| 5512 | if (!curr->sched_class->yield_to_task) | ||
| 5513 | goto out; | ||
| 5514 | |||
| 5515 | if (curr->sched_class != p->sched_class) | ||
| 5516 | goto out; | ||
| 5517 | |||
| 5518 | if (task_running(p_rq, p) || p->state) | ||
| 5519 | goto out; | ||
| 5520 | |||
| 5521 | yielded = curr->sched_class->yield_to_task(rq, p, preempt); | ||
| 5522 | if (yielded) | ||
| 5523 | schedstat_inc(rq, yld_count); | ||
| 5524 | |||
| 5525 | out: | ||
| 5526 | double_rq_unlock(rq, p_rq); | ||
| 5527 | local_irq_restore(flags); | ||
| 5528 | |||
| 5529 | if (yielded) | ||
| 5530 | schedule(); | ||
| 5531 | |||
| 5532 | return yielded; | ||
| 5533 | } | ||
| 5534 | EXPORT_SYMBOL_GPL(yield_to); | ||
| 5535 | |||
| 5451 | /* | 5536 | /* |
| 5452 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so | 5537 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
| 5453 | * that process accounting knows that this is a task in IO wait state. | 5538 | * that process accounting knows that this is a task in IO wait state. |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c0fbeb99283..02702469404 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -1975,6 +1975,25 @@ static void yield_task_fair(struct rq *rq) | |||
| 1975 | set_skip_buddy(se); | 1975 | set_skip_buddy(se); |
| 1976 | } | 1976 | } |
| 1977 | 1977 | ||
| 1978 | static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt) | ||
| 1979 | { | ||
| 1980 | struct sched_entity *se = &p->se; | ||
| 1981 | |||
| 1982 | if (!se->on_rq) | ||
| 1983 | return false; | ||
| 1984 | |||
| 1985 | /* Tell the scheduler that we'd really like pse to run next. */ | ||
| 1986 | set_next_buddy(se); | ||
| 1987 | |||
| 1988 | /* Make p's CPU reschedule; pick_next_entity takes care of fairness. */ | ||
| 1989 | if (preempt) | ||
| 1990 | resched_task(rq->curr); | ||
| 1991 | |||
| 1992 | yield_task_fair(rq); | ||
| 1993 | |||
| 1994 | return true; | ||
| 1995 | } | ||
| 1996 | |||
| 1978 | #ifdef CONFIG_SMP | 1997 | #ifdef CONFIG_SMP |
| 1979 | /************************************************** | 1998 | /************************************************** |
| 1980 | * Fair scheduling class load-balancing methods: | 1999 | * Fair scheduling class load-balancing methods: |
| @@ -4243,6 +4262,7 @@ static const struct sched_class fair_sched_class = { | |||
| 4243 | .enqueue_task = enqueue_task_fair, | 4262 | .enqueue_task = enqueue_task_fair, |
| 4244 | .dequeue_task = dequeue_task_fair, | 4263 | .dequeue_task = dequeue_task_fair, |
| 4245 | .yield_task = yield_task_fair, | 4264 | .yield_task = yield_task_fair, |
| 4265 | .yield_to_task = yield_to_task_fair, | ||
| 4246 | 4266 | ||
| 4247 | .check_preempt_curr = check_preempt_wakeup, | 4267 | .check_preempt_curr = check_preempt_wakeup, |
| 4248 | 4268 | ||
