aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/kvm/kvm-s390.c25
-rw-r--r--arch/s390/kvm/kvm-s390.h4
-rw-r--r--arch/s390/kvm/sigp.c15
3 files changed, 31 insertions, 13 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 7f4783525144..55eb8dec2a77 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -873,7 +873,7 @@ static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
873 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit 873 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
874 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix 874 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
875 */ 875 */
876int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 876int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr)
877{ 877{
878 unsigned char archmode = 1; 878 unsigned char archmode = 1;
879 int prefix; 879 int prefix;
@@ -891,15 +891,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
891 } else 891 } else
892 prefix = 0; 892 prefix = 0;
893 893
894 /*
895 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
896 * copying in vcpu load/put. Lets update our copies before we save
897 * it into the save area
898 */
899 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
900 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
901 save_access_regs(vcpu->run->s.regs.acrs);
902
903 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), 894 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
904 vcpu->arch.guest_fpregs.fprs, 128, prefix)) 895 vcpu->arch.guest_fpregs.fprs, 128, prefix))
905 return -EFAULT; 896 return -EFAULT;
@@ -944,6 +935,20 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
944 return 0; 935 return 0;
945} 936}
946 937
938int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
939{
940 /*
941 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
942 * copying in vcpu load/put. Lets update our copies before we save
943 * it into the save area
944 */
945 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
946 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
947 save_access_regs(vcpu->run->s.regs.acrs);
948
949 return kvm_s390_store_status_unloaded(vcpu, addr);
950}
951
947static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 952static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
948 struct kvm_enable_cap *cap) 953 struct kvm_enable_cap *cap)
949{ 954{
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index fcd25b4bc7af..36f6b1890d4b 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -145,8 +145,8 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
145int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); 145int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
146 146
147/* implemented in kvm-s390.c */ 147/* implemented in kvm-s390.c */
148int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, 148int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
149 unsigned long addr); 149int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
150void s390_vcpu_block(struct kvm_vcpu *vcpu); 150void s390_vcpu_block(struct kvm_vcpu *vcpu);
151void s390_vcpu_unblock(struct kvm_vcpu *vcpu); 151void s390_vcpu_unblock(struct kvm_vcpu *vcpu);
152void exit_sie(struct kvm_vcpu *vcpu); 152void exit_sie(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index bec398c57acf..6805601262e0 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -130,6 +130,7 @@ unlock:
130static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) 130static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
131{ 131{
132 struct kvm_s390_interrupt_info *inti; 132 struct kvm_s390_interrupt_info *inti;
133 int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
133 134
134 inti = kzalloc(sizeof(*inti), GFP_ATOMIC); 135 inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
135 if (!inti) 136 if (!inti)
@@ -139,6 +140,8 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
139 spin_lock_bh(&li->lock); 140 spin_lock_bh(&li->lock);
140 if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { 141 if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
141 kfree(inti); 142 kfree(inti);
143 if ((action & ACTION_STORE_ON_STOP) != 0)
144 rc = -ESHUTDOWN;
142 goto out; 145 goto out;
143 } 146 }
144 list_add_tail(&inti->list, &li->list); 147 list_add_tail(&inti->list, &li->list);
@@ -150,7 +153,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
150out: 153out:
151 spin_unlock_bh(&li->lock); 154 spin_unlock_bh(&li->lock);
152 155
153 return SIGP_CC_ORDER_CODE_ACCEPTED; 156 return rc;
154} 157}
155 158
156static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) 159static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
@@ -174,6 +177,16 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
174unlock: 177unlock:
175 spin_unlock(&fi->lock); 178 spin_unlock(&fi->lock);
176 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); 179 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
180
181 if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
182 /* If the CPU has already been stopped, we still have
183 * to save the status when doing stop-and-store. This
184 * has to be done after unlocking all spinlocks. */
185 struct kvm_vcpu *dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
186 rc = kvm_s390_store_status_unloaded(dst_vcpu,
187 KVM_S390_STORE_STATUS_NOADDR);
188 }
189
177 return rc; 190 return rc;
178} 191}
179 192