aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorThomas Huth <thuth@linux.vnet.ibm.com>2013-09-12 04:33:44 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2013-09-24 13:12:18 -0400
commita76ccff6f5ed89153bf58ef4215b5512a0316877 (patch)
tree300410874db2b3021c77f288f0e240c2e0ec4f2e /arch/s390
parent3fb4c40f0715f382ba7092c5d9a1804d45818039 (diff)
KVM: s390: Push run loop into __vcpu_run
Moved the do-while loop from kvm_arch_vcpu_ioctl_run into __vcpu_run and the calling of kvm_handle_sie_intercept() into vcpu_post_run() (so we can add the srcu locks in a proper way in the next patch). Signed-off-by: Thomas Huth <thuth@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/kvm/kvm-s390.c49
1 files changed, 25 insertions, 24 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 69c7592e80d9..8eec7abc5664 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -738,6 +738,13 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
738 738
739 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 739 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
740 740
741 if (rc == 0) {
742 if (kvm_is_ucontrol(vcpu->kvm))
743 rc = -EOPNOTSUPP;
744 else
745 rc = kvm_handle_sie_intercept(vcpu);
746 }
747
741 return rc; 748 return rc;
742} 749}
743 750
@@ -745,21 +752,24 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
745{ 752{
746 int rc, exit_reason; 753 int rc, exit_reason;
747 754
748 rc = vcpu_pre_run(vcpu); 755 do {
749 if (rc) 756 rc = vcpu_pre_run(vcpu);
750 return rc; 757 if (rc)
751 758 break;
752 /*
753 * As PF_VCPU will be used in fault handler, between guest_enter
754 * and guest_exit should be no uaccess.
755 */
756 preempt_disable();
757 kvm_guest_enter();
758 preempt_enable();
759 exit_reason = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
760 kvm_guest_exit();
761 759
762 rc = vcpu_post_run(vcpu, exit_reason); 760 /*
761 * As PF_VCPU will be used in fault handler, between
762 * guest_enter and guest_exit should be no uaccess.
763 */
764 preempt_disable();
765 kvm_guest_enter();
766 preempt_enable();
767 exit_reason = sie64a(vcpu->arch.sie_block,
768 vcpu->run->s.regs.gprs);
769 kvm_guest_exit();
770
771 rc = vcpu_post_run(vcpu, exit_reason);
772 } while (!signal_pending(current) && !rc);
763 773
764 return rc; 774 return rc;
765} 775}
@@ -801,16 +811,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
801 } 811 }
802 812
803 might_fault(); 813 might_fault();
804 814 rc = __vcpu_run(vcpu);
805 do {
806 rc = __vcpu_run(vcpu);
807 if (rc)
808 break;
809 if (kvm_is_ucontrol(vcpu->kvm))
810 rc = -EOPNOTSUPP;
811 else
812 rc = kvm_handle_sie_intercept(vcpu);
813 } while (!signal_pending(current) && !rc);
814 815
815 if (signal_pending(current) && !rc) { 816 if (signal_pending(current) && !rc) {
816 kvm_run->exit_reason = KVM_EXIT_INTR; 817 kvm_run->exit_reason = KVM_EXIT_INTR;