aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-24 16:07:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-24 16:07:18 -0500
commit89f883372fa60f604d136924baf3e89ff1870e9e (patch)
treecb69b0a14957945ba00d3d392bf9ccbbef56f3b8 /kernel
parent9e2d59ad580d590134285f361a0e80f0e98c0207 (diff)
parent6b73a96065e89dc9fa75ba4f78b1aa3a3bbd0470 (diff)
Merge tag 'kvm-3.9-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Marcelo Tosatti: "KVM updates for the 3.9 merge window, including x86 real mode emulation fixes, stronger memory slot interface restrictions, mmu_lock spinlock hold time reduction, improved handling of large page faults on shadow, initial APICv HW acceleration support, s390 channel IO based virtio, amongst others" * tag 'kvm-3.9-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (143 commits) Revert "KVM: MMU: lazily drop large spte" x86: pvclock kvm: align allocation size to page size KVM: nVMX: Remove redundant get_vmcs12 from nested_vmx_exit_handled_msr x86 emulator: fix parity calculation for AAD instruction KVM: PPC: BookE: Handle alignment interrupts booke: Added DBCR4 SPR number KVM: PPC: booke: Allow multiple exception types KVM: PPC: booke: use vcpu reference from thread_struct KVM: Remove user_alloc from struct kvm_memory_slot KVM: VMX: disable apicv by default KVM: s390: Fix handling of iscs. KVM: MMU: cleanup __direct_map KVM: MMU: remove pt_access in mmu_set_spte KVM: MMU: cleanup mapping-level KVM: MMU: lazily drop large spte KVM: VMX: cleanup vmx_set_cr0(). KVM: VMX: add missing exit names to VMX_EXIT_REASONS array KVM: VMX: disable SMEP feature when guest is in non-paging mode KVM: Remove duplicate text in api.txt Revert "KVM: MMU: split kvm_mmu_free_page" ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 053dfd7692d1..f1bdecf09afb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4374,7 +4374,10 @@ EXPORT_SYMBOL(yield);
4374 * It's the caller's job to ensure that the target task struct 4374 * It's the caller's job to ensure that the target task struct
4375 * can't go away on us before we can do any checks. 4375 * can't go away on us before we can do any checks.
4376 * 4376 *
4377 * Returns true if we indeed boosted the target task. 4377 * Returns:
4378 * true (>0) if we indeed boosted the target task.
4379 * false (0) if we failed to boost the target.
4380 * -ESRCH if there's no task to yield to.
4378 */ 4381 */
4379bool __sched yield_to(struct task_struct *p, bool preempt) 4382bool __sched yield_to(struct task_struct *p, bool preempt)
4380{ 4383{
@@ -4388,6 +4391,15 @@ bool __sched yield_to(struct task_struct *p, bool preempt)
4388 4391
4389again: 4392again:
4390 p_rq = task_rq(p); 4393 p_rq = task_rq(p);
4394 /*
4395 * If we're the only runnable task on the rq and target rq also
4396 * has only one task, there's absolutely no point in yielding.
4397 */
4398 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
4399 yielded = -ESRCH;
4400 goto out_irq;
4401 }
4402
4391 double_rq_lock(rq, p_rq); 4403 double_rq_lock(rq, p_rq);
4392 while (task_rq(p) != p_rq) { 4404 while (task_rq(p) != p_rq) {
4393 double_rq_unlock(rq, p_rq); 4405 double_rq_unlock(rq, p_rq);
@@ -4395,13 +4407,13 @@ again:
4395 } 4407 }
4396 4408
4397 if (!curr->sched_class->yield_to_task) 4409 if (!curr->sched_class->yield_to_task)
4398 goto out; 4410 goto out_unlock;
4399 4411
4400 if (curr->sched_class != p->sched_class) 4412 if (curr->sched_class != p->sched_class)
4401 goto out; 4413 goto out_unlock;
4402 4414
4403 if (task_running(p_rq, p) || p->state) 4415 if (task_running(p_rq, p) || p->state)
4404 goto out; 4416 goto out_unlock;
4405 4417
4406 yielded = curr->sched_class->yield_to_task(rq, p, preempt); 4418 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
4407 if (yielded) { 4419 if (yielded) {
@@ -4414,11 +4426,12 @@ again:
4414 resched_task(p_rq->curr); 4426 resched_task(p_rq->curr);
4415 } 4427 }
4416 4428
4417out: 4429out_unlock:
4418 double_rq_unlock(rq, p_rq); 4430 double_rq_unlock(rq, p_rq);
4431out_irq:
4419 local_irq_restore(flags); 4432 local_irq_restore(flags);
4420 4433
4421 if (yielded) 4434 if (yielded > 0)
4422 schedule(); 4435 schedule();
4423 4436
4424 return yielded; 4437 return yielded;