diff options
| author | Konstantin Weitz <WEITZKON@de.ibm.com> | 2012-04-25 09:30:38 -0400 |
|---|---|---|
| committer | Marcelo Tosatti <mtosatti@redhat.com> | 2012-04-30 20:38:31 -0400 |
| commit | 41628d334361670d825fb03c04568f5ef9f084dc (patch) | |
| tree | 88b2eadd0f7de12f1d8b226e4491532b8783ee94 /virt | |
| parent | b6ddf05ff68d81a7c1736717faf492b70e9bf4f9 (diff) | |
KVM: s390: Implement the directed yield (diag 9c) hypervisor call for KVM
This patch implements the directed yield hypercall found on other
System z hypervisors. It delegates execution time to the virtual cpu
specified in the instruction's parameter.
Useful to avoid long spinlock waits in the guest.
Christian Borntraeger: moved common code in virt/kvm/
Signed-off-by: Konstantin Weitz <WEITZKON@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt')
| -rw-r--r-- | virt/kvm/kvm_main.c | 42 |
1 files changed, 26 insertions, 16 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1847c762d8d9..7e140683ff14 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -1543,6 +1543,31 @@ void kvm_resched(struct kvm_vcpu *vcpu) | |||
| 1543 | } | 1543 | } |
| 1544 | EXPORT_SYMBOL_GPL(kvm_resched); | 1544 | EXPORT_SYMBOL_GPL(kvm_resched); |
| 1545 | 1545 | ||
| 1546 | bool kvm_vcpu_yield_to(struct kvm_vcpu *target) | ||
| 1547 | { | ||
| 1548 | struct pid *pid; | ||
| 1549 | struct task_struct *task = NULL; | ||
| 1550 | |||
| 1551 | rcu_read_lock(); | ||
| 1552 | pid = rcu_dereference(target->pid); | ||
| 1553 | if (pid) | ||
| 1554 | task = get_pid_task(target->pid, PIDTYPE_PID); | ||
| 1555 | rcu_read_unlock(); | ||
| 1556 | if (!task) | ||
| 1557 | return false; | ||
| 1558 | if (task->flags & PF_VCPU) { | ||
| 1559 | put_task_struct(task); | ||
| 1560 | return false; | ||
| 1561 | } | ||
| 1562 | if (yield_to(task, 1)) { | ||
| 1563 | put_task_struct(task); | ||
| 1564 | return true; | ||
| 1565 | } | ||
| 1566 | put_task_struct(task); | ||
| 1567 | return false; | ||
| 1568 | } | ||
| 1569 | EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); | ||
| 1570 | |||
| 1546 | void kvm_vcpu_on_spin(struct kvm_vcpu *me) | 1571 | void kvm_vcpu_on_spin(struct kvm_vcpu *me) |
| 1547 | { | 1572 | { |
| 1548 | struct kvm *kvm = me->kvm; | 1573 | struct kvm *kvm = me->kvm; |
| @@ -1561,8 +1586,6 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) | |||
| 1561 | */ | 1586 | */ |
| 1562 | for (pass = 0; pass < 2 && !yielded; pass++) { | 1587 | for (pass = 0; pass < 2 && !yielded; pass++) { |
| 1563 | kvm_for_each_vcpu(i, vcpu, kvm) { | 1588 | kvm_for_each_vcpu(i, vcpu, kvm) { |
| 1564 | struct task_struct *task = NULL; | ||
| 1565 | struct pid *pid; | ||
| 1566 | if (!pass && i < last_boosted_vcpu) { | 1589 | if (!pass && i < last_boosted_vcpu) { |
| 1567 | i = last_boosted_vcpu; | 1590 | i = last_boosted_vcpu; |
| 1568 | continue; | 1591 | continue; |
| @@ -1572,24 +1595,11 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) | |||
| 1572 | continue; | 1595 | continue; |
| 1573 | if (waitqueue_active(&vcpu->wq)) | 1596 | if (waitqueue_active(&vcpu->wq)) |
| 1574 | continue; | 1597 | continue; |
| 1575 | rcu_read_lock(); | 1598 | if (kvm_vcpu_yield_to(vcpu)) { |
| 1576 | pid = rcu_dereference(vcpu->pid); | ||
| 1577 | if (pid) | ||
| 1578 | task = get_pid_task(vcpu->pid, PIDTYPE_PID); | ||
| 1579 | rcu_read_unlock(); | ||
| 1580 | if (!task) | ||
| 1581 | continue; | ||
| 1582 | if (task->flags & PF_VCPU) { | ||
| 1583 | put_task_struct(task); | ||
| 1584 | continue; | ||
| 1585 | } | ||
| 1586 | if (yield_to(task, 1)) { | ||
| 1587 | put_task_struct(task); | ||
| 1588 | kvm->last_boosted_vcpu = i; | 1599 | kvm->last_boosted_vcpu = i; |
| 1589 | yielded = 1; | 1600 | yielded = 1; |
| 1590 | break; | 1601 | break; |
| 1591 | } | 1602 | } |
| 1592 | put_task_struct(task); | ||
| 1593 | } | 1603 | } |
| 1594 | } | 1604 | } |
| 1595 | } | 1605 | } |
