aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Huth <thuth@linux.vnet.ibm.com>2013-09-12 04:33:43 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2013-09-24 13:12:18 -0400
commit3fb4c40f0715f382ba7092c5d9a1804d45818039 (patch)
tree351f2624e353e41d983c659e012283e1f19e1775
parent6b948a7276b61ba0bb5a102bd240d2473a751506 (diff)
KVM: s390: Split up __vcpu_run into three parts
In preparation for the following patch (which will change the indentation of __vcpu_run quite a bit), this patch puts most of the code from __vcpu_run into separate functions. The first function handles the code that runs before the SIE instruction and the other one handles the code that runs afterwards. Signed-off-by: Thomas Huth <thuth@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/s390/kvm/kvm-s390.c54
1 files changed, 37 insertions, 17 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index e3e7ff77ba44..69c7592e80d9 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -689,9 +689,9 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
689 return 0; 689 return 0;
690} 690}
691 691
692static int __vcpu_run(struct kvm_vcpu *vcpu) 692static int vcpu_pre_run(struct kvm_vcpu *vcpu)
693{ 693{
694 int rc; 694 int rc, cpuflags;
695 695
696 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); 696 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
697 697
@@ -709,28 +709,24 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
709 return rc; 709 return rc;
710 710
711 vcpu->arch.sie_block->icptcode = 0; 711 vcpu->arch.sie_block->icptcode = 0;
712 VCPU_EVENT(vcpu, 6, "entering sie flags %x", 712 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
713 atomic_read(&vcpu->arch.sie_block->cpuflags)); 713 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
714 trace_kvm_s390_sie_enter(vcpu, 714 trace_kvm_s390_sie_enter(vcpu, cpuflags);
715 atomic_read(&vcpu->arch.sie_block->cpuflags));
716 715
717 /* 716 return 0;
718 * As PF_VCPU will be used in fault handler, between guest_enter 717}
719 * and guest_exit should be no uaccess. 718
720 */ 719static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
721 preempt_disable(); 720{
722 kvm_guest_enter(); 721 int rc;
723 preempt_enable();
724 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
725 kvm_guest_exit();
726 722
727 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 723 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
728 vcpu->arch.sie_block->icptcode); 724 vcpu->arch.sie_block->icptcode);
729 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 725 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
730 726
731 if (rc > 0) 727 if (exit_reason >= 0) {
732 rc = 0; 728 rc = 0;
733 if (rc < 0) { 729 } else {
734 if (kvm_is_ucontrol(vcpu->kvm)) { 730 if (kvm_is_ucontrol(vcpu->kvm)) {
735 rc = SIE_INTERCEPT_UCONTROL; 731 rc = SIE_INTERCEPT_UCONTROL;
736 } else { 732 } else {
@@ -741,6 +737,30 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
741 } 737 }
742 738
743 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 739 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
740
741 return rc;
742}
743
744static int __vcpu_run(struct kvm_vcpu *vcpu)
745{
746 int rc, exit_reason;
747
748 rc = vcpu_pre_run(vcpu);
749 if (rc)
750 return rc;
751
752 /*
753 * As PF_VCPU will be used in fault handler, between guest_enter
754 * and guest_exit should be no uaccess.
755 */
756 preempt_disable();
757 kvm_guest_enter();
758 preempt_enable();
759 exit_reason = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
760 kvm_guest_exit();
761
762 rc = vcpu_post_run(vcpu, exit_reason);
763
744 return rc; 764 return rc;
745} 765}
746 766