aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-01-22 02:39:13 -0500
committerGleb Natapov <gleb@redhat.com>2013-01-29 08:38:37 -0500
commit7b270f609982f68f2433442bf167f735e7364b06 (patch)
tree07430a425125c1938fad3befc6debec667310ec4 /kernel/sched/core.c
parentc7c9c56ca26f7b9458711b2d78b60b60e0d38ba7 (diff)
sched: Bail out of yield_to when source and target runqueue has one task
In case of undercomitted scenarios, especially in large guests yield_to overhead is significantly high. when run queue length of source and target is one, take an opportunity to bail out and return -ESRCH. This return condition can be further exploited to quickly come out of PLE handler. (History: Raghavendra initially worked on break out of kvm ple handler upon seeing source runqueue length = 1, but it had to export rq length). Peter came up with the elegant idea of return -ESRCH in scheduler core. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Raghavendra, Checking the rq length of target vcpu condition added.(thanks Avi) Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Acked-by: Andrew Jones <drjones@redhat.com> Tested-by: Chegu Vinod <chegu_vinod@hp.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0533496b6228..01edad9b5d71 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4316,7 +4316,10 @@ EXPORT_SYMBOL(yield);
4316 * It's the caller's job to ensure that the target task struct 4316 * It's the caller's job to ensure that the target task struct
4317 * can't go away on us before we can do any checks. 4317 * can't go away on us before we can do any checks.
4318 * 4318 *
4319 * Returns true if we indeed boosted the target task. 4319 * Returns:
4320 * true (>0) if we indeed boosted the target task.
4321 * false (0) if we failed to boost the target.
4322 * -ESRCH if there's no task to yield to.
4320 */ 4323 */
4321bool __sched yield_to(struct task_struct *p, bool preempt) 4324bool __sched yield_to(struct task_struct *p, bool preempt)
4322{ 4325{
@@ -4330,6 +4333,15 @@ bool __sched yield_to(struct task_struct *p, bool preempt)
4330 4333
4331again: 4334again:
4332 p_rq = task_rq(p); 4335 p_rq = task_rq(p);
4336 /*
4337 * If we're the only runnable task on the rq and target rq also
4338 * has only one task, there's absolutely no point in yielding.
4339 */
4340 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
4341 yielded = -ESRCH;
4342 goto out_irq;
4343 }
4344
4333 double_rq_lock(rq, p_rq); 4345 double_rq_lock(rq, p_rq);
4334 while (task_rq(p) != p_rq) { 4346 while (task_rq(p) != p_rq) {
4335 double_rq_unlock(rq, p_rq); 4347 double_rq_unlock(rq, p_rq);
@@ -4337,13 +4349,13 @@ again:
4337 } 4349 }
4338 4350
4339 if (!curr->sched_class->yield_to_task) 4351 if (!curr->sched_class->yield_to_task)
4340 goto out; 4352 goto out_unlock;
4341 4353
4342 if (curr->sched_class != p->sched_class) 4354 if (curr->sched_class != p->sched_class)
4343 goto out; 4355 goto out_unlock;
4344 4356
4345 if (task_running(p_rq, p) || p->state) 4357 if (task_running(p_rq, p) || p->state)
4346 goto out; 4358 goto out_unlock;
4347 4359
4348 yielded = curr->sched_class->yield_to_task(rq, p, preempt); 4360 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
4349 if (yielded) { 4361 if (yielded) {
@@ -4356,11 +4368,12 @@ again:
4356 resched_task(p_rq->curr); 4368 resched_task(p_rq->curr);
4357 } 4369 }
4358 4370
4359out: 4371out_unlock:
4360 double_rq_unlock(rq, p_rq); 4372 double_rq_unlock(rq, p_rq);
4373out_irq:
4361 local_irq_restore(flags); 4374 local_irq_restore(flags);
4362 4375
4363 if (yielded) 4376 if (yielded > 0)
4364 schedule(); 4377 schedule();
4365 4378
4366 return yielded; 4379 return yielded;