aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/kvm_host.h5
-rw-r--r--arch/s390/kvm/intercept.c15
-rw-r--r--arch/s390/kvm/interrupt.c71
-rw-r--r--arch/s390/kvm/kvm-s390.c9
-rw-r--r--arch/s390/kvm/kvm-s390.h2
-rw-r--r--arch/s390/kvm/sigp.c64
-rw-r--r--arch/s390/kvm/trace-s390.h14
7 files changed, 88 insertions, 92 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 5eafe84a7b3d..02e42480609d 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -383,10 +383,6 @@ struct kvm_s390_interrupt_info {
383 }; 383 };
384}; 384};
385 385
386/* for local_interrupt.action_flags */
387#define ACTION_STORE_ON_STOP (1<<0)
388#define ACTION_STOP_ON_STOP (1<<1)
389
390struct kvm_s390_irq_payload { 386struct kvm_s390_irq_payload {
391 struct kvm_s390_io_info io; 387 struct kvm_s390_io_info io;
392 struct kvm_s390_ext_info ext; 388 struct kvm_s390_ext_info ext;
@@ -403,7 +399,6 @@ struct kvm_s390_local_interrupt {
403 struct kvm_s390_float_interrupt *float_int; 399 struct kvm_s390_float_interrupt *float_int;
404 wait_queue_head_t *wq; 400 wait_queue_head_t *wq;
405 atomic_t *cpuflags; 401 atomic_t *cpuflags;
406 unsigned int action_bits;
407 DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS); 402 DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
408 struct kvm_s390_irq_payload irq; 403 struct kvm_s390_irq_payload irq;
409 unsigned long pending_irqs; 404 unsigned long pending_irqs;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 81c77ab8102e..08e01acc13c3 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -68,18 +68,23 @@ static int handle_noop(struct kvm_vcpu *vcpu)
68 68
69static int handle_stop(struct kvm_vcpu *vcpu) 69static int handle_stop(struct kvm_vcpu *vcpu)
70{ 70{
71 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
71 int rc = 0; 72 int rc = 0;
72 unsigned int action_bits; 73 uint8_t flags, stop_pending;
73 74
74 vcpu->stat.exit_stop_request++; 75 vcpu->stat.exit_stop_request++;
75 trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
76 76
77 action_bits = vcpu->arch.local_int.action_bits; 77 /* avoid races with the injection/SIGP STOP code */
78 spin_lock(&li->lock);
79 flags = li->irq.stop.flags;
80 stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
81 spin_unlock(&li->lock);
78 82
79 if (!(action_bits & ACTION_STOP_ON_STOP)) 83 trace_kvm_s390_stop_request(stop_pending, flags);
84 if (!stop_pending)
80 return 0; 85 return 0;
81 86
82 if (action_bits & ACTION_STORE_ON_STOP) { 87 if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) {
83 rc = kvm_s390_vcpu_store_status(vcpu, 88 rc = kvm_s390_vcpu_store_status(vcpu,
84 KVM_S390_STORE_STATUS_NOADDR); 89 KVM_S390_STORE_STATUS_NOADDR);
85 if (rc) 90 if (rc)
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 73bafc3d0f41..18721886eb05 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -159,6 +159,12 @@ static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
159 if (psw_mchk_disabled(vcpu)) 159 if (psw_mchk_disabled(vcpu))
160 active_mask &= ~IRQ_PEND_MCHK_MASK; 160 active_mask &= ~IRQ_PEND_MCHK_MASK;
161 161
162 /*
163 * STOP irqs will never be actively delivered. They are triggered via
164 * intercept requests and cleared when the stop intercept is performed.
165 */
166 __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
167
162 return active_mask; 168 return active_mask;
163} 169}
164 170
@@ -186,9 +192,6 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
186 LCTL_CR10 | LCTL_CR11); 192 LCTL_CR10 | LCTL_CR11);
187 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); 193 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
188 } 194 }
189
190 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP)
191 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
192} 195}
193 196
194static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 197static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
@@ -216,11 +219,18 @@ static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
216 vcpu->arch.sie_block->lctl |= LCTL_CR14; 219 vcpu->arch.sie_block->lctl |= LCTL_CR14;
217} 220}
218 221
222static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
223{
224 if (kvm_s390_is_stop_irq_pending(vcpu))
225 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
226}
227
219/* Set interception request for non-deliverable local interrupts */ 228/* Set interception request for non-deliverable local interrupts */
220static void set_intercept_indicators_local(struct kvm_vcpu *vcpu) 229static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
221{ 230{
222 set_intercept_indicators_ext(vcpu); 231 set_intercept_indicators_ext(vcpu);
223 set_intercept_indicators_mchk(vcpu); 232 set_intercept_indicators_mchk(vcpu);
233 set_intercept_indicators_stop(vcpu);
224} 234}
225 235
226static void __set_intercept_indicator(struct kvm_vcpu *vcpu, 236static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
@@ -392,25 +402,6 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
392 return rc ? -EFAULT : 0; 402 return rc ? -EFAULT : 0;
393} 403}
394 404
395static int __must_check __deliver_stop(struct kvm_vcpu *vcpu)
396{
397 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
398 struct kvm_s390_stop_info *stop = &li->irq.stop;
399
400 spin_lock(&li->lock);
401 stop->flags = 0;
402 clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
403 spin_unlock(&li->lock);
404
405 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
406 vcpu->stat.deliver_stop_signal++;
407 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_STOP,
408 0, 0);
409
410 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
411 return 0;
412}
413
414static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu) 405static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
415{ 406{
416 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 407 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -712,7 +703,6 @@ static const deliver_irq_t deliver_irq_funcs[] = {
712 [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc, 703 [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
713 [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer, 704 [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
714 [IRQ_PEND_RESTART] = __deliver_restart, 705 [IRQ_PEND_RESTART] = __deliver_restart,
715 [IRQ_PEND_SIGP_STOP] = __deliver_stop,
716 [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, 706 [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
717 [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, 707 [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
718}; 708};
@@ -783,6 +773,9 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
783 if (!rc && kvm_s390_si_ext_call_pending(vcpu)) 773 if (!rc && kvm_s390_si_ext_call_pending(vcpu))
784 rc = 1; 774 rc = 1;
785 775
776 if (!rc && kvm_s390_is_stop_irq_pending(vcpu))
777 rc = 1;
778
786 return rc; 779 return rc;
787} 780}
788 781
@@ -1038,20 +1031,29 @@ static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1038 return 0; 1031 return 0;
1039} 1032}
1040 1033
1041#define KVM_S390_STOP_SUPP_FLAGS 0 1034#define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1042static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1035static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1043{ 1036{
1044 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1037 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1045 struct kvm_s390_stop_info *stop = &li->irq.stop; 1038 struct kvm_s390_stop_info *stop = &li->irq.stop;
1039 int rc = 0;
1046 1040
1047 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2); 1041 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
1048 1042
1049 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS) 1043 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1050 return -EINVAL; 1044 return -EINVAL;
1051 1045
1052 li->action_bits |= ACTION_STOP_ON_STOP; 1046 if (is_vcpu_stopped(vcpu)) {
1047 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1048 rc = kvm_s390_store_status_unloaded(vcpu,
1049 KVM_S390_STORE_STATUS_NOADDR);
1050 return rc;
1051 }
1052
1053 if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1054 return -EBUSY;
1053 stop->flags = irq->u.stop.flags; 1055 stop->flags = irq->u.stop.flags;
1054 set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); 1056 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
1055 return 0; 1057 return 0;
1056} 1058}
1057 1059
@@ -1339,6 +1341,23 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1339 return 0; 1341 return 0;
1340} 1342}
1341 1343
1344int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1345{
1346 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1347
1348 return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1349}
1350
1351void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1352{
1353 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1354
1355 spin_lock(&li->lock);
1356 li->irq.stop.flags = 0;
1357 clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1358 spin_unlock(&li->lock);
1359}
1360
1342int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1361int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1343{ 1362{
1344 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1363 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 37ef06c19c31..b987b5674625 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1624,15 +1624,10 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1624 spin_lock(&vcpu->kvm->arch.start_stop_lock); 1624 spin_lock(&vcpu->kvm->arch.start_stop_lock);
1625 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 1625 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1626 1626
1627 /* Need to lock access to action_bits to avoid a SIGP race condition */
1628 spin_lock(&vcpu->arch.local_int.lock);
1629 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1630
1631 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 1627 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1632 vcpu->arch.local_int.action_bits &= 1628 kvm_s390_clear_stop_irq(vcpu);
1633 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
1634 spin_unlock(&vcpu->arch.local_int.lock);
1635 1629
1630 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1636 __disable_ibs_on_vcpu(vcpu); 1631 __disable_ibs_on_vcpu(vcpu);
1637 1632
1638 for (i = 0; i < online_vcpus; i++) { 1633 for (i = 0; i < online_vcpus; i++) {
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index a8f3d9b71c11..d72ff624920e 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -233,6 +233,8 @@ int psw_extint_disabled(struct kvm_vcpu *vcpu);
233void kvm_s390_destroy_adapters(struct kvm *kvm); 233void kvm_s390_destroy_adapters(struct kvm *kvm);
234int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu); 234int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu);
235extern struct kvm_device_ops kvm_flic_ops; 235extern struct kvm_device_ops kvm_flic_ops;
236int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
237void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
236 238
237/* implemented in guestdbg.c */ 239/* implemented in guestdbg.c */
238void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); 240void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 6651f9f73973..a25185444c70 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -112,38 +112,19 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu,
112 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; 112 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
113} 113}
114 114
115static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
116{
117 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
118 int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
119
120 spin_lock(&li->lock);
121 if (li->action_bits & ACTION_STOP_ON_STOP) {
122 /* another SIGP STOP is pending */
123 rc = SIGP_CC_BUSY;
124 goto out;
125 }
126 if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
127 if ((action & ACTION_STORE_ON_STOP) != 0)
128 rc = -ESHUTDOWN;
129 goto out;
130 }
131 set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
132 li->action_bits |= action;
133 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
134 kvm_s390_vcpu_wakeup(dst_vcpu);
135out:
136 spin_unlock(&li->lock);
137
138 return rc;
139}
140
141static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) 115static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
142{ 116{
117 struct kvm_s390_irq irq = {
118 .type = KVM_S390_SIGP_STOP,
119 };
143 int rc; 120 int rc;
144 121
145 rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP); 122 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
146 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id); 123 if (rc == -EBUSY)
124 rc = SIGP_CC_BUSY;
125 else if (rc == 0)
126 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x",
127 dst_vcpu->vcpu_id);
147 128
148 return rc; 129 return rc;
149} 130}
@@ -151,20 +132,18 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
151static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu, 132static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
152 struct kvm_vcpu *dst_vcpu, u64 *reg) 133 struct kvm_vcpu *dst_vcpu, u64 *reg)
153{ 134{
135 struct kvm_s390_irq irq = {
136 .type = KVM_S390_SIGP_STOP,
137 .u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS,
138 };
154 int rc; 139 int rc;
155 140
156 rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP | 141 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
157 ACTION_STORE_ON_STOP); 142 if (rc == -EBUSY)
158 VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x", 143 rc = SIGP_CC_BUSY;
159 dst_vcpu->vcpu_id); 144 else if (rc == 0)
160 145 VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
161 if (rc == -ESHUTDOWN) { 146 dst_vcpu->vcpu_id);
162 /* If the CPU has already been stopped, we still have
163 * to save the status when doing stop-and-store. This
164 * has to be done after unlocking all spinlocks. */
165 rc = kvm_s390_store_status_unloaded(dst_vcpu,
166 KVM_S390_STORE_STATUS_NOADDR);
167 }
168 147
169 return rc; 148 return rc;
170} 149}
@@ -242,9 +221,7 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
242 int flags; 221 int flags;
243 int rc; 222 int rc;
244 223
245 spin_lock(&dst_vcpu->arch.local_int.lock);
246 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); 224 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
247 spin_unlock(&dst_vcpu->arch.local_int.lock);
248 if (!(flags & CPUSTAT_STOPPED)) { 225 if (!(flags & CPUSTAT_STOPPED)) {
249 *reg &= 0xffffffff00000000UL; 226 *reg &= 0xffffffff00000000UL;
250 *reg |= SIGP_STATUS_INCORRECT_STATE; 227 *reg |= SIGP_STATUS_INCORRECT_STATE;
@@ -291,8 +268,9 @@ static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
291 /* handle (RE)START in user space */ 268 /* handle (RE)START in user space */
292 int rc = -EOPNOTSUPP; 269 int rc = -EOPNOTSUPP;
293 270
271 /* make sure we don't race with STOP irq injection */
294 spin_lock(&li->lock); 272 spin_lock(&li->lock);
295 if (li->action_bits & ACTION_STOP_ON_STOP) 273 if (kvm_s390_is_stop_irq_pending(dst_vcpu))
296 rc = SIGP_CC_BUSY; 274 rc = SIGP_CC_BUSY;
297 spin_unlock(&li->lock); 275 spin_unlock(&li->lock);
298 276
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 647e9d6a4818..653a7ec09ef5 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -209,19 +209,21 @@ TRACE_EVENT(kvm_s390_request_resets,
209 * Trace point for a vcpu's stop requests. 209 * Trace point for a vcpu's stop requests.
210 */ 210 */
211TRACE_EVENT(kvm_s390_stop_request, 211TRACE_EVENT(kvm_s390_stop_request,
212 TP_PROTO(unsigned int action_bits), 212 TP_PROTO(unsigned char stop_irq, unsigned char flags),
213 TP_ARGS(action_bits), 213 TP_ARGS(stop_irq, flags),
214 214
215 TP_STRUCT__entry( 215 TP_STRUCT__entry(
216 __field(unsigned int, action_bits) 216 __field(unsigned char, stop_irq)
217 __field(unsigned char, flags)
217 ), 218 ),
218 219
219 TP_fast_assign( 220 TP_fast_assign(
220 __entry->action_bits = action_bits; 221 __entry->stop_irq = stop_irq;
222 __entry->flags = flags;
221 ), 223 ),
222 224
223 TP_printk("stop request, action_bits = %08x", 225 TP_printk("stop request, stop irq = %u, flags = %08x",
224 __entry->action_bits) 226 __entry->stop_irq, __entry->flags)
225 ); 227 );
226 228
227 229