diff options
author | Rik van Riel <riel@redhat.com> | 2011-02-01 09:53:28 -0500 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2011-03-17 12:08:29 -0400 |
commit | 217ece6129f2d3b4fdd18d9e79be9e43d8d14a42 (patch) | |
tree | b2bbd81042f48e862e0c0b8743edc932102640bb /virt/kvm/kvm_main.c | |
parent | 34bb10b79de7df118de832f6832efb630e646577 (diff) |
KVM: use yield_to instead of sleep in kvm_vcpu_on_spin
Instead of sleeping in kvm_vcpu_on_spin, which can cause gigantic
slowdowns of certain workloads, we instead use yield_to to get
another VCPU in the same KVM guest to run sooner.
This seems to give a 10-15% speedup in certain workloads.
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r-- | virt/kvm/kvm_main.c | 57 |
1 files changed, 47 insertions, 10 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index bc8bfd15ab71..2dc53a6dc285 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -1484,18 +1484,55 @@ void kvm_resched(struct kvm_vcpu *vcpu) | |||
1484 | } | 1484 | } |
1485 | EXPORT_SYMBOL_GPL(kvm_resched); | 1485 | EXPORT_SYMBOL_GPL(kvm_resched); |
1486 | 1486 | ||
1487 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu) | 1487 | void kvm_vcpu_on_spin(struct kvm_vcpu *me) |
1488 | { | 1488 | { |
1489 | ktime_t expires; | 1489 | struct kvm *kvm = me->kvm; |
1490 | DEFINE_WAIT(wait); | 1490 | struct kvm_vcpu *vcpu; |
1491 | 1491 | int last_boosted_vcpu = me->kvm->last_boosted_vcpu; | |
1492 | prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); | 1492 | int yielded = 0; |
1493 | 1493 | int pass; | |
1494 | /* Sleep for 100 us, and hope lock-holder got scheduled */ | 1494 | int i; |
1495 | expires = ktime_add_ns(ktime_get(), 100000UL); | ||
1496 | schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); | ||
1497 | 1495 | ||
1498 | finish_wait(&vcpu->wq, &wait); | 1496 | /* |
1497 | * We boost the priority of a VCPU that is runnable but not | ||
1498 | * currently running, because it got preempted by something | ||
1499 | * else and called schedule in __vcpu_run. Hopefully that | ||
1500 | * VCPU is holding the lock that we need and will release it. | ||
1501 | * We approximate round-robin by starting at the last boosted VCPU. | ||
1502 | */ | ||
1503 | for (pass = 0; pass < 2 && !yielded; pass++) { | ||
1504 | kvm_for_each_vcpu(i, vcpu, kvm) { | ||
1505 | struct task_struct *task = NULL; | ||
1506 | struct pid *pid; | ||
1507 | if (!pass && i < last_boosted_vcpu) { | ||
1508 | i = last_boosted_vcpu; | ||
1509 | continue; | ||
1510 | } else if (pass && i > last_boosted_vcpu) | ||
1511 | break; | ||
1512 | if (vcpu == me) | ||
1513 | continue; | ||
1514 | if (waitqueue_active(&vcpu->wq)) | ||
1515 | continue; | ||
1516 | rcu_read_lock(); | ||
1517 | pid = rcu_dereference(vcpu->pid); | ||
1518 | if (pid) | ||
1519 | task = get_pid_task(vcpu->pid, PIDTYPE_PID); | ||
1520 | rcu_read_unlock(); | ||
1521 | if (!task) | ||
1522 | continue; | ||
1523 | if (task->flags & PF_VCPU) { | ||
1524 | put_task_struct(task); | ||
1525 | continue; | ||
1526 | } | ||
1527 | if (yield_to(task, 1)) { | ||
1528 | put_task_struct(task); | ||
1529 | kvm->last_boosted_vcpu = i; | ||
1530 | yielded = 1; | ||
1531 | break; | ||
1532 | } | ||
1533 | put_task_struct(task); | ||
1534 | } | ||
1535 | } | ||
1499 | } | 1536 | } |
1500 | EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); | 1537 | EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); |
1501 | 1538 | ||