aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorMichael Mueller <mimu@linux.vnet.ibm.com>2014-02-26 10:14:19 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2014-02-26 11:32:12 -0500
commit9cac38dd5dc41c943d711b96f9755a29c8b854ea (patch)
tree31aa15f442745714842aff4aba4049096af3b2d4 /arch/s390/kvm
parent98f4a14676127397c54cab7d6119537ed4d113a2 (diff)
KVM/s390: Set preempted flag during vcpu wakeup and interrupt delivery
Commit "kvm: Record the preemption status of vcpus using preempt notifiers" caused a performance regression on s390. It turned out that in the case that if a former sleeping cpu, that was woken up, this cpu is not a yield candidate since it gave up the cpu voluntarily. To retain this candiate its preempted flag is set during wakeup and interrupt delivery time. Significant performance measurement work and code analysis to solve this issue was provided by Mao Chuan Li and his team in Beijing. Signed-off-by: Michael Mueller <mimu@linux.vnet.ibm.com> Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/interrupt.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 1848080c3f34..fff070bd0159 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -505,6 +505,7 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
505 struct kvm_vcpu *vcpu; 505 struct kvm_vcpu *vcpu;
506 506
507 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 507 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
508 vcpu->preempted = true;
508 tasklet_schedule(&vcpu->arch.tasklet); 509 tasklet_schedule(&vcpu->arch.tasklet);
509 510
510 return HRTIMER_NORESTART; 511 return HRTIMER_NORESTART;
@@ -732,6 +733,7 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
732 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 733 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
733 if (waitqueue_active(li->wq)) 734 if (waitqueue_active(li->wq))
734 wake_up_interruptible(li->wq); 735 wake_up_interruptible(li->wq);
736 kvm_get_vcpu(kvm, sigcpu)->preempted = true;
735 spin_unlock_bh(&li->lock); 737 spin_unlock_bh(&li->lock);
736unlock_fi: 738unlock_fi:
737 spin_unlock(&fi->lock); 739 spin_unlock(&fi->lock);
@@ -877,6 +879,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
877 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 879 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
878 if (waitqueue_active(&vcpu->wq)) 880 if (waitqueue_active(&vcpu->wq))
879 wake_up_interruptible(&vcpu->wq); 881 wake_up_interruptible(&vcpu->wq);
882 vcpu->preempted = true;
880 spin_unlock_bh(&li->lock); 883 spin_unlock_bh(&li->lock);
881 mutex_unlock(&vcpu->kvm->lock); 884 mutex_unlock(&vcpu->kvm->lock);
882 return 0; 885 return 0;