diff options
author | Jens Freimann <jfrei@linux.vnet.ibm.com> | 2014-07-29 09:11:49 -0400 |
---|---|---|
committer | Christian Borntraeger <borntraeger@de.ibm.com> | 2014-11-28 07:59:04 -0500 |
commit | 383d0b050106abecb82f43101cac94fa423af5cd (patch) | |
tree | 0c774c70890bc3423147f366549a47d34a19dd6a | |
parent | c0e6159d519ec429ebf0d54025726cbe41b9e456 (diff) |
KVM: s390: handle pending local interrupts via bitmap
This patch adapts handling of local interrupts to be more compliant with
the z/Architecture Principles of Operation and introduces a data
structure
which allows more efficient handling of interrupts.
* get rid of li->active flag, use bitmap instead
* Keep interrupts in a bitmap instead of a list
* Deliver interrupts in the order of their priority as defined in the
PoP
* Use a second bitmap for sigp emergency requests, as a CPU can have
one request pending from every other CPU in the system.
Signed-off-by: Jens Freimann <jfrei@linux.vnet.ibm.com>
Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
Reviewed-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 4 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 601 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 14 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.h | 5 | ||||
-rw-r--r-- | arch/s390/kvm/sigp.c | 36 |
6 files changed, 380 insertions, 282 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 624a821fcba0..9cba74d5d853 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -398,8 +398,6 @@ struct kvm_s390_irq_payload { | |||
398 | 398 | ||
399 | struct kvm_s390_local_interrupt { | 399 | struct kvm_s390_local_interrupt { |
400 | spinlock_t lock; | 400 | spinlock_t lock; |
401 | struct list_head list; | ||
402 | atomic_t active; | ||
403 | struct kvm_s390_float_interrupt *float_int; | 401 | struct kvm_s390_float_interrupt *float_int; |
404 | wait_queue_head_t *wq; | 402 | wait_queue_head_t *wq; |
405 | atomic_t *cpuflags; | 403 | atomic_t *cpuflags; |
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 1d244df5f8c4..81c77ab8102e 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -257,7 +257,7 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) | |||
257 | static int handle_external_interrupt(struct kvm_vcpu *vcpu) | 257 | static int handle_external_interrupt(struct kvm_vcpu *vcpu) |
258 | { | 258 | { |
259 | u16 eic = vcpu->arch.sie_block->eic; | 259 | u16 eic = vcpu->arch.sie_block->eic; |
260 | struct kvm_s390_interrupt irq; | 260 | struct kvm_s390_irq irq; |
261 | psw_t newpsw; | 261 | psw_t newpsw; |
262 | int rc; | 262 | int rc; |
263 | 263 | ||
@@ -282,7 +282,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu) | |||
282 | if (kvm_s390_si_ext_call_pending(vcpu)) | 282 | if (kvm_s390_si_ext_call_pending(vcpu)) |
283 | return 0; | 283 | return 0; |
284 | irq.type = KVM_S390_INT_EXTERNAL_CALL; | 284 | irq.type = KVM_S390_INT_EXTERNAL_CALL; |
285 | irq.parm = vcpu->arch.sie_block->extcpuaddr; | 285 | irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr; |
286 | break; | 286 | break; |
287 | default: | 287 | default: |
288 | return -EOPNOTSUPP; | 288 | return -EOPNOTSUPP; |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 0d7f0a7be2fc..1aa7f2845ca8 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/mmu_context.h> | 16 | #include <linux/mmu_context.h> |
17 | #include <linux/signal.h> | 17 | #include <linux/signal.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/bitmap.h> | ||
19 | #include <asm/asm-offsets.h> | 20 | #include <asm/asm-offsets.h> |
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | #include "kvm-s390.h" | 22 | #include "kvm-s390.h" |
@@ -136,6 +137,31 @@ static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu, | |||
136 | return 0; | 137 | return 0; |
137 | } | 138 | } |
138 | 139 | ||
140 | static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) | ||
141 | { | ||
142 | return vcpu->arch.local_int.pending_irqs; | ||
143 | } | ||
144 | |||
145 | static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu) | ||
146 | { | ||
147 | unsigned long active_mask = pending_local_irqs(vcpu); | ||
148 | |||
149 | if (psw_extint_disabled(vcpu)) | ||
150 | active_mask &= ~IRQ_PEND_EXT_MASK; | ||
151 | if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) | ||
152 | __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); | ||
153 | if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) | ||
154 | __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask); | ||
155 | if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) | ||
156 | __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); | ||
157 | if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) | ||
158 | __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); | ||
159 | if (psw_mchk_disabled(vcpu)) | ||
160 | active_mask &= ~IRQ_PEND_MCHK_MASK; | ||
161 | |||
162 | return active_mask; | ||
163 | } | ||
164 | |||
139 | static void __set_cpu_idle(struct kvm_vcpu *vcpu) | 165 | static void __set_cpu_idle(struct kvm_vcpu *vcpu) |
140 | { | 166 | { |
141 | atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); | 167 | atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); |
@@ -170,26 +196,45 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) | |||
170 | atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); | 196 | atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); |
171 | } | 197 | } |
172 | 198 | ||
199 | static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) | ||
200 | { | ||
201 | if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK)) | ||
202 | return; | ||
203 | if (psw_extint_disabled(vcpu)) | ||
204 | __set_cpuflag(vcpu, CPUSTAT_EXT_INT); | ||
205 | else | ||
206 | vcpu->arch.sie_block->lctl |= LCTL_CR0; | ||
207 | } | ||
208 | |||
209 | static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) | ||
210 | { | ||
211 | if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) | ||
212 | return; | ||
213 | if (psw_mchk_disabled(vcpu)) | ||
214 | vcpu->arch.sie_block->ictl |= ICTL_LPSW; | ||
215 | else | ||
216 | vcpu->arch.sie_block->lctl |= LCTL_CR14; | ||
217 | } | ||
218 | |||
219 | /* Set interception request for non-deliverable local interrupts */ | ||
220 | static void set_intercept_indicators_local(struct kvm_vcpu *vcpu) | ||
221 | { | ||
222 | set_intercept_indicators_ext(vcpu); | ||
223 | set_intercept_indicators_mchk(vcpu); | ||
224 | } | ||
225 | |||
173 | static void __set_intercept_indicator(struct kvm_vcpu *vcpu, | 226 | static void __set_intercept_indicator(struct kvm_vcpu *vcpu, |
174 | struct kvm_s390_interrupt_info *inti) | 227 | struct kvm_s390_interrupt_info *inti) |
175 | { | 228 | { |
176 | switch (inti->type) { | 229 | switch (inti->type) { |
177 | case KVM_S390_INT_EXTERNAL_CALL: | ||
178 | case KVM_S390_INT_EMERGENCY: | ||
179 | case KVM_S390_INT_SERVICE: | 230 | case KVM_S390_INT_SERVICE: |
180 | case KVM_S390_INT_PFAULT_INIT: | ||
181 | case KVM_S390_INT_PFAULT_DONE: | 231 | case KVM_S390_INT_PFAULT_DONE: |
182 | case KVM_S390_INT_VIRTIO: | 232 | case KVM_S390_INT_VIRTIO: |
183 | case KVM_S390_INT_CLOCK_COMP: | ||
184 | case KVM_S390_INT_CPU_TIMER: | ||
185 | if (psw_extint_disabled(vcpu)) | 233 | if (psw_extint_disabled(vcpu)) |
186 | __set_cpuflag(vcpu, CPUSTAT_EXT_INT); | 234 | __set_cpuflag(vcpu, CPUSTAT_EXT_INT); |
187 | else | 235 | else |
188 | vcpu->arch.sie_block->lctl |= LCTL_CR0; | 236 | vcpu->arch.sie_block->lctl |= LCTL_CR0; |
189 | break; | 237 | break; |
190 | case KVM_S390_SIGP_STOP: | ||
191 | __set_cpuflag(vcpu, CPUSTAT_STOP_INT); | ||
192 | break; | ||
193 | case KVM_S390_MCHK: | 238 | case KVM_S390_MCHK: |
194 | if (psw_mchk_disabled(vcpu)) | 239 | if (psw_mchk_disabled(vcpu)) |
195 | vcpu->arch.sie_block->ictl |= ICTL_LPSW; | 240 | vcpu->arch.sie_block->ictl |= ICTL_LPSW; |
@@ -228,6 +273,7 @@ static u16 get_ilc(struct kvm_vcpu *vcpu) | |||
228 | 273 | ||
229 | static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) | 274 | static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) |
230 | { | 275 | { |
276 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | ||
231 | int rc; | 277 | int rc; |
232 | 278 | ||
233 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, | 279 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, |
@@ -239,11 +285,13 @@ static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) | |||
239 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 285 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
240 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | 286 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
241 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 287 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
288 | clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); | ||
242 | return rc; | 289 | return rc; |
243 | } | 290 | } |
244 | 291 | ||
245 | static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) | 292 | static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) |
246 | { | 293 | { |
294 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | ||
247 | int rc; | 295 | int rc; |
248 | 296 | ||
249 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, | 297 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, |
@@ -255,20 +303,27 @@ static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) | |||
255 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 303 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
256 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | 304 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
257 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 305 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
306 | clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); | ||
258 | return rc; | 307 | return rc; |
259 | } | 308 | } |
260 | 309 | ||
261 | static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu, | 310 | static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) |
262 | struct kvm_s390_interrupt_info *inti) | ||
263 | { | 311 | { |
264 | struct kvm_s390_ext_info *ext = &inti->ext; | 312 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
313 | struct kvm_s390_ext_info ext; | ||
265 | int rc; | 314 | int rc; |
266 | 315 | ||
316 | spin_lock(&li->lock); | ||
317 | ext = li->irq.ext; | ||
318 | clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); | ||
319 | li->irq.ext.ext_params2 = 0; | ||
320 | spin_unlock(&li->lock); | ||
321 | |||
267 | VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx", | 322 | VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx", |
268 | 0, ext->ext_params2); | 323 | 0, ext.ext_params2); |
269 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | 324 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
270 | KVM_S390_INT_PFAULT_INIT, | 325 | KVM_S390_INT_PFAULT_INIT, |
271 | 0, ext->ext_params2); | 326 | 0, ext.ext_params2); |
272 | 327 | ||
273 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE); | 328 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE); |
274 | rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR); | 329 | rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR); |
@@ -276,28 +331,40 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu, | |||
276 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 331 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
277 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | 332 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
278 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 333 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
279 | rc |= put_guest_lc(vcpu, ext->ext_params2, (u64 *) __LC_EXT_PARAMS2); | 334 | rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2); |
280 | return rc; | 335 | return rc; |
281 | } | 336 | } |
282 | 337 | ||
283 | static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu, | 338 | static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) |
284 | struct kvm_s390_interrupt_info *inti) | ||
285 | { | 339 | { |
286 | struct kvm_s390_mchk_info *mchk = &inti->mchk; | 340 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
341 | struct kvm_s390_mchk_info mchk; | ||
287 | int rc; | 342 | int rc; |
288 | 343 | ||
344 | spin_lock(&li->lock); | ||
345 | mchk = li->irq.mchk; | ||
346 | /* | ||
347 | * If there was an exigent machine check pending, then any repressible | ||
348 | * machine checks that might have been pending are indicated along | ||
349 | * with it, so always clear both bits | ||
350 | */ | ||
351 | clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); | ||
352 | clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); | ||
353 | memset(&li->irq.mchk, 0, sizeof(mchk)); | ||
354 | spin_unlock(&li->lock); | ||
355 | |||
289 | VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", | 356 | VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", |
290 | mchk->mcic); | 357 | mchk.mcic); |
291 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, | 358 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, |
292 | mchk->cr14, mchk->mcic); | 359 | mchk.cr14, mchk.mcic); |
293 | 360 | ||
294 | rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); | 361 | rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); |
295 | rc |= put_guest_lc(vcpu, mchk->mcic, | 362 | rc |= put_guest_lc(vcpu, mchk.mcic, |
296 | (u64 __user *) __LC_MCCK_CODE); | 363 | (u64 __user *) __LC_MCCK_CODE); |
297 | rc |= put_guest_lc(vcpu, mchk->failing_storage_address, | 364 | rc |= put_guest_lc(vcpu, mchk.failing_storage_address, |
298 | (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); | 365 | (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); |
299 | rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, | 366 | rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, |
300 | &mchk->fixed_logout, sizeof(mchk->fixed_logout)); | 367 | &mchk.fixed_logout, sizeof(mchk.fixed_logout)); |
301 | rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, | 368 | rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, |
302 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 369 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
303 | rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, | 370 | rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, |
@@ -307,6 +374,7 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu, | |||
307 | 374 | ||
308 | static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) | 375 | static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) |
309 | { | 376 | { |
377 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | ||
310 | int rc; | 378 | int rc; |
311 | 379 | ||
312 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); | 380 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); |
@@ -318,6 +386,7 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) | |||
318 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 386 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
319 | rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), | 387 | rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), |
320 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 388 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
389 | clear_bit(IRQ_PEND_RESTART, &li->pending_irqs); | ||
321 | return rc; | 390 | return rc; |
322 | } | 391 | } |
323 | 392 | ||
@@ -329,38 +398,52 @@ static int __must_check __deliver_stop(struct kvm_vcpu *vcpu) | |||
329 | 0, 0); | 398 | 0, 0); |
330 | 399 | ||
331 | __set_cpuflag(vcpu, CPUSTAT_STOP_INT); | 400 | __set_cpuflag(vcpu, CPUSTAT_STOP_INT); |
401 | clear_bit(IRQ_PEND_SIGP_STOP, &vcpu->arch.local_int.pending_irqs); | ||
332 | return 0; | 402 | return 0; |
333 | } | 403 | } |
334 | 404 | ||
335 | static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu, | 405 | static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu) |
336 | struct kvm_s390_interrupt_info *inti) | ||
337 | { | 406 | { |
338 | struct kvm_s390_prefix_info *prefix = &inti->prefix; | 407 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
408 | struct kvm_s390_prefix_info prefix; | ||
409 | |||
410 | spin_lock(&li->lock); | ||
411 | prefix = li->irq.prefix; | ||
412 | li->irq.prefix.address = 0; | ||
413 | clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); | ||
414 | spin_unlock(&li->lock); | ||
339 | 415 | ||
340 | VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix->address); | 416 | VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address); |
341 | vcpu->stat.deliver_prefix_signal++; | 417 | vcpu->stat.deliver_prefix_signal++; |
342 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | 418 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
343 | KVM_S390_SIGP_SET_PREFIX, | 419 | KVM_S390_SIGP_SET_PREFIX, |
344 | prefix->address, 0); | 420 | prefix.address, 0); |
345 | 421 | ||
346 | kvm_s390_set_prefix(vcpu, prefix->address); | 422 | kvm_s390_set_prefix(vcpu, prefix.address); |
347 | return 0; | 423 | return 0; |
348 | } | 424 | } |
349 | 425 | ||
350 | static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu, | 426 | static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu) |
351 | struct kvm_s390_interrupt_info *inti) | ||
352 | { | 427 | { |
353 | struct kvm_s390_emerg_info *emerg = &inti->emerg; | 428 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
354 | int rc; | 429 | int rc; |
430 | int cpu_addr; | ||
431 | |||
432 | spin_lock(&li->lock); | ||
433 | cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS); | ||
434 | clear_bit(cpu_addr, li->sigp_emerg_pending); | ||
435 | if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS)) | ||
436 | clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); | ||
437 | spin_unlock(&li->lock); | ||
355 | 438 | ||
356 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); | 439 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); |
357 | vcpu->stat.deliver_emergency_signal++; | 440 | vcpu->stat.deliver_emergency_signal++; |
358 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 441 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, |
359 | inti->emerg.code, 0); | 442 | cpu_addr, 0); |
360 | 443 | ||
361 | rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG, | 444 | rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG, |
362 | (u16 *)__LC_EXT_INT_CODE); | 445 | (u16 *)__LC_EXT_INT_CODE); |
363 | rc |= put_guest_lc(vcpu, emerg->code, (u16 *)__LC_EXT_CPU_ADDR); | 446 | rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR); |
364 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, | 447 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
365 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 448 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
366 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | 449 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
@@ -368,21 +451,27 @@ static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu, | |||
368 | return rc; | 451 | return rc; |
369 | } | 452 | } |
370 | 453 | ||
371 | static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu, | 454 | static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu) |
372 | struct kvm_s390_interrupt_info *inti) | ||
373 | { | 455 | { |
374 | struct kvm_s390_extcall_info *extcall = &inti->extcall; | 456 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
457 | struct kvm_s390_extcall_info extcall; | ||
375 | int rc; | 458 | int rc; |
376 | 459 | ||
460 | spin_lock(&li->lock); | ||
461 | extcall = li->irq.extcall; | ||
462 | li->irq.extcall.code = 0; | ||
463 | clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); | ||
464 | spin_unlock(&li->lock); | ||
465 | |||
377 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); | 466 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); |
378 | vcpu->stat.deliver_external_call++; | 467 | vcpu->stat.deliver_external_call++; |
379 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | 468 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
380 | KVM_S390_INT_EXTERNAL_CALL, | 469 | KVM_S390_INT_EXTERNAL_CALL, |
381 | extcall->code, 0); | 470 | extcall.code, 0); |
382 | 471 | ||
383 | rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL, | 472 | rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL, |
384 | (u16 *)__LC_EXT_INT_CODE); | 473 | (u16 *)__LC_EXT_INT_CODE); |
385 | rc |= put_guest_lc(vcpu, extcall->code, (u16 *)__LC_EXT_CPU_ADDR); | 474 | rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR); |
386 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, | 475 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
387 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 476 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
388 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, | 477 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, |
@@ -390,20 +479,26 @@ static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu, | |||
390 | return rc; | 479 | return rc; |
391 | } | 480 | } |
392 | 481 | ||
393 | static int __must_check __deliver_prog(struct kvm_vcpu *vcpu, | 482 | static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) |
394 | struct kvm_s390_interrupt_info *inti) | ||
395 | { | 483 | { |
396 | struct kvm_s390_pgm_info *pgm_info = &inti->pgm; | 484 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
485 | struct kvm_s390_pgm_info pgm_info; | ||
397 | int rc = 0; | 486 | int rc = 0; |
398 | u16 ilc = get_ilc(vcpu); | 487 | u16 ilc = get_ilc(vcpu); |
399 | 488 | ||
489 | spin_lock(&li->lock); | ||
490 | pgm_info = li->irq.pgm; | ||
491 | clear_bit(IRQ_PEND_PROG, &li->pending_irqs); | ||
492 | memset(&li->irq.pgm, 0, sizeof(pgm_info)); | ||
493 | spin_unlock(&li->lock); | ||
494 | |||
400 | VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", | 495 | VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", |
401 | pgm_info->code, ilc); | 496 | pgm_info.code, ilc); |
402 | vcpu->stat.deliver_program_int++; | 497 | vcpu->stat.deliver_program_int++; |
403 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, | 498 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, |
404 | pgm_info->code, 0); | 499 | pgm_info.code, 0); |
405 | 500 | ||
406 | switch (pgm_info->code & ~PGM_PER) { | 501 | switch (pgm_info.code & ~PGM_PER) { |
407 | case PGM_AFX_TRANSLATION: | 502 | case PGM_AFX_TRANSLATION: |
408 | case PGM_ASX_TRANSLATION: | 503 | case PGM_ASX_TRANSLATION: |
409 | case PGM_EX_TRANSLATION: | 504 | case PGM_EX_TRANSLATION: |
@@ -414,7 +509,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu, | |||
414 | case PGM_PRIMARY_AUTHORITY: | 509 | case PGM_PRIMARY_AUTHORITY: |
415 | case PGM_SECONDARY_AUTHORITY: | 510 | case PGM_SECONDARY_AUTHORITY: |
416 | case PGM_SPACE_SWITCH: | 511 | case PGM_SPACE_SWITCH: |
417 | rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, | 512 | rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, |
418 | (u64 *)__LC_TRANS_EXC_CODE); | 513 | (u64 *)__LC_TRANS_EXC_CODE); |
419 | break; | 514 | break; |
420 | case PGM_ALEN_TRANSLATION: | 515 | case PGM_ALEN_TRANSLATION: |
@@ -423,7 +518,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu, | |||
423 | case PGM_ASTE_SEQUENCE: | 518 | case PGM_ASTE_SEQUENCE: |
424 | case PGM_ASTE_VALIDITY: | 519 | case PGM_ASTE_VALIDITY: |
425 | case PGM_EXTENDED_AUTHORITY: | 520 | case PGM_EXTENDED_AUTHORITY: |
426 | rc = put_guest_lc(vcpu, pgm_info->exc_access_id, | 521 | rc = put_guest_lc(vcpu, pgm_info.exc_access_id, |
427 | (u8 *)__LC_EXC_ACCESS_ID); | 522 | (u8 *)__LC_EXC_ACCESS_ID); |
428 | break; | 523 | break; |
429 | case PGM_ASCE_TYPE: | 524 | case PGM_ASCE_TYPE: |
@@ -432,44 +527,44 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu, | |||
432 | case PGM_REGION_SECOND_TRANS: | 527 | case PGM_REGION_SECOND_TRANS: |
433 | case PGM_REGION_THIRD_TRANS: | 528 | case PGM_REGION_THIRD_TRANS: |
434 | case PGM_SEGMENT_TRANSLATION: | 529 | case PGM_SEGMENT_TRANSLATION: |
435 | rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, | 530 | rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, |
436 | (u64 *)__LC_TRANS_EXC_CODE); | 531 | (u64 *)__LC_TRANS_EXC_CODE); |
437 | rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, | 532 | rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, |
438 | (u8 *)__LC_EXC_ACCESS_ID); | 533 | (u8 *)__LC_EXC_ACCESS_ID); |
439 | rc |= put_guest_lc(vcpu, pgm_info->op_access_id, | 534 | rc |= put_guest_lc(vcpu, pgm_info.op_access_id, |
440 | (u8 *)__LC_OP_ACCESS_ID); | 535 | (u8 *)__LC_OP_ACCESS_ID); |
441 | break; | 536 | break; |
442 | case PGM_MONITOR: | 537 | case PGM_MONITOR: |
443 | rc = put_guest_lc(vcpu, pgm_info->mon_class_nr, | 538 | rc = put_guest_lc(vcpu, pgm_info.mon_class_nr, |
444 | (u16 *)__LC_MON_CLASS_NR); | 539 | (u16 *)__LC_MON_CLASS_NR); |
445 | rc |= put_guest_lc(vcpu, pgm_info->mon_code, | 540 | rc |= put_guest_lc(vcpu, pgm_info.mon_code, |
446 | (u64 *)__LC_MON_CODE); | 541 | (u64 *)__LC_MON_CODE); |
447 | break; | 542 | break; |
448 | case PGM_DATA: | 543 | case PGM_DATA: |
449 | rc = put_guest_lc(vcpu, pgm_info->data_exc_code, | 544 | rc = put_guest_lc(vcpu, pgm_info.data_exc_code, |
450 | (u32 *)__LC_DATA_EXC_CODE); | 545 | (u32 *)__LC_DATA_EXC_CODE); |
451 | break; | 546 | break; |
452 | case PGM_PROTECTION: | 547 | case PGM_PROTECTION: |
453 | rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, | 548 | rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, |
454 | (u64 *)__LC_TRANS_EXC_CODE); | 549 | (u64 *)__LC_TRANS_EXC_CODE); |
455 | rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, | 550 | rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, |
456 | (u8 *)__LC_EXC_ACCESS_ID); | 551 | (u8 *)__LC_EXC_ACCESS_ID); |
457 | break; | 552 | break; |
458 | } | 553 | } |
459 | 554 | ||
460 | if (pgm_info->code & PGM_PER) { | 555 | if (pgm_info.code & PGM_PER) { |
461 | rc |= put_guest_lc(vcpu, pgm_info->per_code, | 556 | rc |= put_guest_lc(vcpu, pgm_info.per_code, |
462 | (u8 *) __LC_PER_CODE); | 557 | (u8 *) __LC_PER_CODE); |
463 | rc |= put_guest_lc(vcpu, pgm_info->per_atmid, | 558 | rc |= put_guest_lc(vcpu, pgm_info.per_atmid, |
464 | (u8 *)__LC_PER_ATMID); | 559 | (u8 *)__LC_PER_ATMID); |
465 | rc |= put_guest_lc(vcpu, pgm_info->per_address, | 560 | rc |= put_guest_lc(vcpu, pgm_info.per_address, |
466 | (u64 *) __LC_PER_ADDRESS); | 561 | (u64 *) __LC_PER_ADDRESS); |
467 | rc |= put_guest_lc(vcpu, pgm_info->per_access_id, | 562 | rc |= put_guest_lc(vcpu, pgm_info.per_access_id, |
468 | (u8 *) __LC_PER_ACCESS_ID); | 563 | (u8 *) __LC_PER_ACCESS_ID); |
469 | } | 564 | } |
470 | 565 | ||
471 | rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); | 566 | rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); |
472 | rc |= put_guest_lc(vcpu, pgm_info->code, | 567 | rc |= put_guest_lc(vcpu, pgm_info.code, |
473 | (u16 *)__LC_PGM_INT_CODE); | 568 | (u16 *)__LC_PGM_INT_CODE); |
474 | rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, | 569 | rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, |
475 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 570 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
@@ -572,50 +667,63 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu, | |||
572 | return rc; | 667 | return rc; |
573 | } | 668 | } |
574 | 669 | ||
575 | static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu, | 670 | static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu, |
576 | struct kvm_s390_interrupt_info *inti) | 671 | struct kvm_s390_interrupt_info *inti) |
672 | { | ||
673 | struct kvm_s390_mchk_info *mchk = &inti->mchk; | ||
674 | int rc; | ||
675 | |||
676 | VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", | ||
677 | mchk->mcic); | ||
678 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, | ||
679 | mchk->cr14, mchk->mcic); | ||
680 | |||
681 | rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); | ||
682 | rc |= put_guest_lc(vcpu, mchk->mcic, | ||
683 | (u64 __user *) __LC_MCCK_CODE); | ||
684 | rc |= put_guest_lc(vcpu, mchk->failing_storage_address, | ||
685 | (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); | ||
686 | rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, | ||
687 | &mchk->fixed_logout, sizeof(mchk->fixed_logout)); | ||
688 | rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, | ||
689 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
690 | rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, | ||
691 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
692 | return rc; | ||
693 | } | ||
694 | |||
695 | typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu); | ||
696 | |||
697 | static const deliver_irq_t deliver_irq_funcs[] = { | ||
698 | [IRQ_PEND_MCHK_EX] = __deliver_machine_check, | ||
699 | [IRQ_PEND_PROG] = __deliver_prog, | ||
700 | [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal, | ||
701 | [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call, | ||
702 | [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc, | ||
703 | [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer, | ||
704 | [IRQ_PEND_RESTART] = __deliver_restart, | ||
705 | [IRQ_PEND_SIGP_STOP] = __deliver_stop, | ||
706 | [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, | ||
707 | [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, | ||
708 | }; | ||
709 | |||
710 | static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu, | ||
711 | struct kvm_s390_interrupt_info *inti) | ||
577 | { | 712 | { |
578 | int rc; | 713 | int rc; |
579 | 714 | ||
580 | switch (inti->type) { | 715 | switch (inti->type) { |
581 | case KVM_S390_INT_EMERGENCY: | ||
582 | rc = __deliver_emergency_signal(vcpu, inti); | ||
583 | break; | ||
584 | case KVM_S390_INT_EXTERNAL_CALL: | ||
585 | rc = __deliver_external_call(vcpu, inti); | ||
586 | break; | ||
587 | case KVM_S390_INT_CLOCK_COMP: | ||
588 | rc = __deliver_ckc(vcpu); | ||
589 | break; | ||
590 | case KVM_S390_INT_CPU_TIMER: | ||
591 | rc = __deliver_cpu_timer(vcpu); | ||
592 | break; | ||
593 | case KVM_S390_INT_SERVICE: | 716 | case KVM_S390_INT_SERVICE: |
594 | rc = __deliver_service(vcpu, inti); | 717 | rc = __deliver_service(vcpu, inti); |
595 | break; | 718 | break; |
596 | case KVM_S390_INT_PFAULT_INIT: | ||
597 | rc = __deliver_pfault_init(vcpu, inti); | ||
598 | break; | ||
599 | case KVM_S390_INT_PFAULT_DONE: | 719 | case KVM_S390_INT_PFAULT_DONE: |
600 | rc = __deliver_pfault_done(vcpu, inti); | 720 | rc = __deliver_pfault_done(vcpu, inti); |
601 | break; | 721 | break; |
602 | case KVM_S390_INT_VIRTIO: | 722 | case KVM_S390_INT_VIRTIO: |
603 | rc = __deliver_virtio(vcpu, inti); | 723 | rc = __deliver_virtio(vcpu, inti); |
604 | break; | 724 | break; |
605 | case KVM_S390_SIGP_STOP: | ||
606 | rc = __deliver_stop(vcpu); | ||
607 | break; | ||
608 | case KVM_S390_SIGP_SET_PREFIX: | ||
609 | rc = __deliver_set_prefix(vcpu, inti); | ||
610 | break; | ||
611 | case KVM_S390_RESTART: | ||
612 | rc = __deliver_restart(vcpu); | ||
613 | break; | ||
614 | case KVM_S390_PROGRAM_INT: | ||
615 | rc = __deliver_prog(vcpu, inti); | ||
616 | break; | ||
617 | case KVM_S390_MCHK: | 725 | case KVM_S390_MCHK: |
618 | rc = __deliver_machine_check(vcpu, inti); | 726 | rc = __deliver_mchk_floating(vcpu, inti); |
619 | break; | 727 | break; |
620 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | 728 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
621 | rc = __deliver_io(vcpu, inti); | 729 | rc = __deliver_io(vcpu, inti); |
@@ -643,20 +751,11 @@ int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu) | |||
643 | 751 | ||
644 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | 752 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) |
645 | { | 753 | { |
646 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | ||
647 | struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; | 754 | struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; |
648 | struct kvm_s390_interrupt_info *inti; | 755 | struct kvm_s390_interrupt_info *inti; |
649 | int rc = 0; | 756 | int rc; |
650 | 757 | ||
651 | if (atomic_read(&li->active)) { | 758 | rc = !!deliverable_local_irqs(vcpu); |
652 | spin_lock(&li->lock); | ||
653 | list_for_each_entry(inti, &li->list, list) | ||
654 | if (__interrupt_is_deliverable(vcpu, inti)) { | ||
655 | rc = 1; | ||
656 | break; | ||
657 | } | ||
658 | spin_unlock(&li->lock); | ||
659 | } | ||
660 | 759 | ||
661 | if ((!rc) && atomic_read(&fi->active)) { | 760 | if ((!rc) && atomic_read(&fi->active)) { |
662 | spin_lock(&fi->lock); | 761 | spin_lock(&fi->lock); |
@@ -748,18 +847,15 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) | |||
748 | void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) | 847 | void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) |
749 | { | 848 | { |
750 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 849 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
751 | struct kvm_s390_interrupt_info *n, *inti = NULL; | ||
752 | 850 | ||
753 | spin_lock(&li->lock); | 851 | spin_lock(&li->lock); |
754 | list_for_each_entry_safe(inti, n, &li->list, list) { | 852 | li->pending_irqs = 0; |
755 | list_del(&inti->list); | 853 | bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS); |
756 | kfree(inti); | 854 | memset(&li->irq, 0, sizeof(li->irq)); |
757 | } | ||
758 | atomic_set(&li->active, 0); | ||
759 | spin_unlock(&li->lock); | 855 | spin_unlock(&li->lock); |
760 | 856 | ||
761 | /* clear pending external calls set by sigp interpretation facility */ | 857 | /* clear pending external calls set by sigp interpretation facility */ |
762 | atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); | 858 | atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); |
763 | atomic_clear_mask(SIGP_CTRL_C, | 859 | atomic_clear_mask(SIGP_CTRL_C, |
764 | &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl); | 860 | &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl); |
765 | } | 861 | } |
@@ -769,34 +865,35 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
769 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 865 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
770 | struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; | 866 | struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; |
771 | struct kvm_s390_interrupt_info *n, *inti = NULL; | 867 | struct kvm_s390_interrupt_info *n, *inti = NULL; |
868 | deliver_irq_t func; | ||
772 | int deliver; | 869 | int deliver; |
773 | int rc = 0; | 870 | int rc = 0; |
871 | unsigned long irq_type; | ||
872 | unsigned long deliverable_irqs; | ||
774 | 873 | ||
775 | __reset_intercept_indicators(vcpu); | 874 | __reset_intercept_indicators(vcpu); |
776 | if (atomic_read(&li->active)) { | ||
777 | do { | ||
778 | deliver = 0; | ||
779 | spin_lock(&li->lock); | ||
780 | list_for_each_entry_safe(inti, n, &li->list, list) { | ||
781 | if (__interrupt_is_deliverable(vcpu, inti)) { | ||
782 | list_del(&inti->list); | ||
783 | deliver = 1; | ||
784 | break; | ||
785 | } | ||
786 | __set_intercept_indicator(vcpu, inti); | ||
787 | } | ||
788 | if (list_empty(&li->list)) | ||
789 | atomic_set(&li->active, 0); | ||
790 | spin_unlock(&li->lock); | ||
791 | if (deliver) { | ||
792 | rc = __do_deliver_interrupt(vcpu, inti); | ||
793 | kfree(inti); | ||
794 | } | ||
795 | } while (!rc && deliver); | ||
796 | } | ||
797 | 875 | ||
798 | if (!rc && kvm_cpu_has_pending_timer(vcpu)) | 876 | /* pending ckc conditions might have been invalidated */ |
799 | rc = __deliver_ckc(vcpu); | 877 | clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); |
878 | if (kvm_cpu_has_pending_timer(vcpu)) | ||
879 | set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); | ||
880 | |||
881 | do { | ||
882 | deliverable_irqs = deliverable_local_irqs(vcpu); | ||
883 | /* bits are in the order of interrupt priority */ | ||
884 | irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT); | ||
885 | if (irq_type == IRQ_PEND_COUNT) | ||
886 | break; | ||
887 | func = deliver_irq_funcs[irq_type]; | ||
888 | if (!func) { | ||
889 | WARN_ON_ONCE(func == NULL); | ||
890 | clear_bit(irq_type, &li->pending_irqs); | ||
891 | continue; | ||
892 | } | ||
893 | rc = func(vcpu); | ||
894 | } while (!rc && irq_type != IRQ_PEND_COUNT); | ||
895 | |||
896 | set_intercept_indicators_local(vcpu); | ||
800 | 897 | ||
801 | if (!rc && atomic_read(&fi->active)) { | 898 | if (!rc && atomic_read(&fi->active)) { |
802 | do { | 899 | do { |
@@ -815,7 +912,7 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
815 | atomic_set(&fi->active, 0); | 912 | atomic_set(&fi->active, 0); |
816 | spin_unlock(&fi->lock); | 913 | spin_unlock(&fi->lock); |
817 | if (deliver) { | 914 | if (deliver) { |
818 | rc = __do_deliver_interrupt(vcpu, inti); | 915 | rc = __deliver_floating_interrupt(vcpu, inti); |
819 | kfree(inti); | 916 | kfree(inti); |
820 | } | 917 | } |
821 | } while (!rc && deliver); | 918 | } while (!rc && deliver); |
@@ -824,33 +921,26 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
824 | return rc; | 921 | return rc; |
825 | } | 922 | } |
826 | 923 | ||
827 | static int __inject_prog_irq(struct kvm_vcpu *vcpu, | 924 | static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) |
828 | struct kvm_s390_interrupt_info *inti) | ||
829 | { | 925 | { |
830 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 926 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
831 | 927 | ||
832 | list_add(&inti->list, &li->list); | 928 | li->irq.pgm = irq->u.pgm; |
833 | atomic_set(&li->active, 1); | 929 | __set_bit(IRQ_PEND_PROG, &li->pending_irqs); |
834 | return 0; | 930 | return 0; |
835 | } | 931 | } |
836 | 932 | ||
837 | int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) | 933 | int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) |
838 | { | 934 | { |
839 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 935 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
840 | struct kvm_s390_interrupt_info *inti; | 936 | struct kvm_s390_irq irq; |
841 | |||
842 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
843 | if (!inti) | ||
844 | return -ENOMEM; | ||
845 | |||
846 | inti->type = KVM_S390_PROGRAM_INT; | ||
847 | inti->pgm.code = code; | ||
848 | 937 | ||
849 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); | 938 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); |
850 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); | 939 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code, |
940 | 0, 1); | ||
851 | spin_lock(&li->lock); | 941 | spin_lock(&li->lock); |
852 | list_add(&inti->list, &li->list); | 942 | irq.u.pgm.code = code; |
853 | atomic_set(&li->active, 1); | 943 | __inject_prog(vcpu, &irq); |
854 | BUG_ON(waitqueue_active(li->wq)); | 944 | BUG_ON(waitqueue_active(li->wq)); |
855 | spin_unlock(&li->lock); | 945 | spin_unlock(&li->lock); |
856 | return 0; | 946 | return 0; |
@@ -860,151 +950,158 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, | |||
860 | struct kvm_s390_pgm_info *pgm_info) | 950 | struct kvm_s390_pgm_info *pgm_info) |
861 | { | 951 | { |
862 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 952 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
863 | struct kvm_s390_interrupt_info *inti; | 953 | struct kvm_s390_irq irq; |
864 | int rc; | 954 | int rc; |
865 | 955 | ||
866 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
867 | if (!inti) | ||
868 | return -ENOMEM; | ||
869 | |||
870 | VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)", | 956 | VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)", |
871 | pgm_info->code); | 957 | pgm_info->code); |
872 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, | 958 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, |
873 | pgm_info->code, 0, 1); | 959 | pgm_info->code, 0, 1); |
874 | |||
875 | inti->type = KVM_S390_PROGRAM_INT; | ||
876 | memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); | ||
877 | spin_lock(&li->lock); | 960 | spin_lock(&li->lock); |
878 | rc = __inject_prog_irq(vcpu, inti); | 961 | irq.u.pgm = *pgm_info; |
962 | rc = __inject_prog(vcpu, &irq); | ||
879 | BUG_ON(waitqueue_active(li->wq)); | 963 | BUG_ON(waitqueue_active(li->wq)); |
880 | spin_unlock(&li->lock); | 964 | spin_unlock(&li->lock); |
881 | return rc; | 965 | return rc; |
882 | } | 966 | } |
883 | 967 | ||
884 | static int __inject_pfault_init(struct kvm_vcpu *vcpu, | 968 | static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) |
885 | struct kvm_s390_interrupt *s390int, | ||
886 | struct kvm_s390_interrupt_info *inti) | ||
887 | { | 969 | { |
888 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 970 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
889 | 971 | ||
890 | inti->ext.ext_params2 = s390int->parm64; | 972 | VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx", |
891 | list_add_tail(&inti->list, &li->list); | 973 | irq->u.ext.ext_params, irq->u.ext.ext_params2); |
892 | atomic_set(&li->active, 1); | 974 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, |
975 | irq->u.ext.ext_params, | ||
976 | irq->u.ext.ext_params2, 2); | ||
977 | |||
978 | li->irq.ext = irq->u.ext; | ||
979 | set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); | ||
893 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 980 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); |
894 | return 0; | 981 | return 0; |
895 | } | 982 | } |
896 | 983 | ||
897 | static int __inject_extcall(struct kvm_vcpu *vcpu, | 984 | int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) |
898 | struct kvm_s390_interrupt *s390int, | ||
899 | struct kvm_s390_interrupt_info *inti) | ||
900 | { | 985 | { |
901 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 986 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
987 | struct kvm_s390_extcall_info *extcall = &li->irq.extcall; | ||
902 | 988 | ||
903 | VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", | 989 | VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", |
904 | s390int->parm); | 990 | irq->u.extcall.code); |
905 | if (s390int->parm & 0xffff0000) | 991 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, |
906 | return -EINVAL; | 992 | irq->u.extcall.code, 0, 2); |
907 | inti->extcall.code = s390int->parm; | 993 | |
908 | list_add_tail(&inti->list, &li->list); | 994 | *extcall = irq->u.extcall; |
909 | atomic_set(&li->active, 1); | 995 | __set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); |
910 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 996 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); |
911 | return 0; | 997 | return 0; |
912 | } | 998 | } |
913 | 999 | ||
914 | static int __inject_set_prefix(struct kvm_vcpu *vcpu, | 1000 | static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) |
915 | struct kvm_s390_interrupt *s390int, | ||
916 | struct kvm_s390_interrupt_info *inti) | ||
917 | { | 1001 | { |
918 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1002 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
1003 | struct kvm_s390_prefix_info *prefix = &li->irq.prefix; | ||
919 | 1004 | ||
920 | VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", | 1005 | VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", |
921 | s390int->parm); | 1006 | prefix->address); |
922 | inti->prefix.address = s390int->parm; | 1007 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, |
923 | list_add_tail(&inti->list, &li->list); | 1008 | prefix->address, 0, 2); |
924 | atomic_set(&li->active, 1); | 1009 | |
1010 | *prefix = irq->u.prefix; | ||
1011 | set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); | ||
925 | return 0; | 1012 | return 0; |
926 | } | 1013 | } |
927 | 1014 | ||
928 | static int __inject_sigp_stop(struct kvm_vcpu *vcpu, | 1015 | static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) |
929 | struct kvm_s390_interrupt *s390int, | ||
930 | struct kvm_s390_interrupt_info *inti) | ||
931 | { | 1016 | { |
932 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1017 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
933 | 1018 | ||
934 | list_add_tail(&inti->list, &li->list); | 1019 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2); |
935 | atomic_set(&li->active, 1); | 1020 | |
936 | li->action_bits |= ACTION_STOP_ON_STOP; | 1021 | li->action_bits |= ACTION_STOP_ON_STOP; |
1022 | set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); | ||
937 | return 0; | 1023 | return 0; |
938 | } | 1024 | } |
939 | 1025 | ||
940 | static int __inject_sigp_restart(struct kvm_vcpu *vcpu, | 1026 | static int __inject_sigp_restart(struct kvm_vcpu *vcpu, |
941 | struct kvm_s390_interrupt *s390int, | 1027 | struct kvm_s390_irq *irq) |
942 | struct kvm_s390_interrupt_info *inti) | ||
943 | { | 1028 | { |
944 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1029 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
945 | 1030 | ||
946 | VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); | 1031 | VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type); |
947 | list_add_tail(&inti->list, &li->list); | 1032 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2); |
948 | atomic_set(&li->active, 1); | 1033 | |
1034 | set_bit(IRQ_PEND_RESTART, &li->pending_irqs); | ||
949 | return 0; | 1035 | return 0; |
950 | } | 1036 | } |
951 | 1037 | ||
952 | static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, | 1038 | static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, |
953 | struct kvm_s390_interrupt *s390int, | 1039 | struct kvm_s390_irq *irq) |
954 | struct kvm_s390_interrupt_info *inti) | ||
955 | { | 1040 | { |
956 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1041 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
1042 | struct kvm_s390_emerg_info *emerg = &li->irq.emerg; | ||
957 | 1043 | ||
958 | VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm); | 1044 | VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", |
959 | if (s390int->parm & 0xffff0000) | 1045 | irq->u.emerg.code); |
960 | return -EINVAL; | 1046 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, |
961 | inti->emerg.code = s390int->parm; | 1047 | emerg->code, 0, 2); |
962 | list_add_tail(&inti->list, &li->list); | 1048 | |
963 | atomic_set(&li->active, 1); | 1049 | set_bit(emerg->code, li->sigp_emerg_pending); |
1050 | set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); | ||
964 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1051 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); |
965 | return 0; | 1052 | return 0; |
966 | } | 1053 | } |
967 | 1054 | ||
968 | static int __inject_mchk(struct kvm_vcpu *vcpu, | 1055 | static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) |
969 | struct kvm_s390_interrupt *s390int, | ||
970 | struct kvm_s390_interrupt_info *inti) | ||
971 | { | 1056 | { |
972 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1057 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
1058 | struct kvm_s390_mchk_info *mchk = &li->irq.mchk; | ||
973 | 1059 | ||
974 | VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", | 1060 | VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", |
975 | s390int->parm64); | 1061 | mchk->mcic); |
976 | inti->mchk.mcic = s390int->parm64; | 1062 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, |
977 | list_add_tail(&inti->list, &li->list); | 1063 | mchk->mcic, 2); |
978 | atomic_set(&li->active, 1); | 1064 | |
1065 | /* | ||
1066 | * Combine mcic with previously injected machine checks and | ||
1067 | * indicate them all together as described in the Principles | ||
1068 | * of Operation, Chapter 11, Interruption action | ||
1069 | */ | ||
1070 | mchk->mcic |= irq->u.mchk.mcic; | ||
1071 | if (mchk->mcic & MCHK_EX_MASK) | ||
1072 | set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); | ||
1073 | else if (mchk->mcic & MCHK_REP_MASK) | ||
1074 | set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); | ||
979 | return 0; | 1075 | return 0; |
980 | } | 1076 | } |
981 | 1077 | ||
982 | static int __inject_ckc(struct kvm_vcpu *vcpu, | 1078 | static int __inject_ckc(struct kvm_vcpu *vcpu) |
983 | struct kvm_s390_interrupt *s390int, | ||
984 | struct kvm_s390_interrupt_info *inti) | ||
985 | { | 1079 | { |
986 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1080 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
987 | 1081 | ||
988 | VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); | 1082 | VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP); |
989 | list_add_tail(&inti->list, &li->list); | 1083 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, |
990 | atomic_set(&li->active, 1); | 1084 | 0, 0, 2); |
1085 | |||
1086 | set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); | ||
991 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1087 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); |
992 | return 0; | 1088 | return 0; |
993 | } | 1089 | } |
994 | 1090 | ||
995 | static int __inject_cpu_timer(struct kvm_vcpu *vcpu, | 1091 | static int __inject_cpu_timer(struct kvm_vcpu *vcpu) |
996 | struct kvm_s390_interrupt *s390int, | ||
997 | struct kvm_s390_interrupt_info *inti) | ||
998 | { | 1092 | { |
999 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1093 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
1000 | 1094 | ||
1001 | VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); | 1095 | VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER); |
1002 | list_add_tail(&inti->list, &li->list); | 1096 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, |
1003 | atomic_set(&li->active, 1); | 1097 | 0, 0, 2); |
1098 | |||
1099 | set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); | ||
1004 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1100 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); |
1005 | return 0; | 1101 | return 0; |
1006 | } | 1102 | } |
1007 | 1103 | ||
1104 | |||
1008 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, | 1105 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
1009 | u64 cr6, u64 schid) | 1106 | u64 cr6, u64 schid) |
1010 | { | 1107 | { |
@@ -1169,58 +1266,74 @@ void kvm_s390_reinject_io_int(struct kvm *kvm, | |||
1169 | __inject_vm(kvm, inti); | 1266 | __inject_vm(kvm, inti); |
1170 | } | 1267 | } |
1171 | 1268 | ||
1172 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | 1269 | int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, |
1173 | struct kvm_s390_interrupt *s390int) | 1270 | struct kvm_s390_irq *irq) |
1271 | { | ||
1272 | irq->type = s390int->type; | ||
1273 | switch (irq->type) { | ||
1274 | case KVM_S390_PROGRAM_INT: | ||
1275 | if (s390int->parm & 0xffff0000) | ||
1276 | return -EINVAL; | ||
1277 | irq->u.pgm.code = s390int->parm; | ||
1278 | break; | ||
1279 | case KVM_S390_SIGP_SET_PREFIX: | ||
1280 | irq->u.prefix.address = s390int->parm; | ||
1281 | break; | ||
1282 | case KVM_S390_INT_EXTERNAL_CALL: | ||
1283 | if (irq->u.extcall.code & 0xffff0000) | ||
1284 | return -EINVAL; | ||
1285 | irq->u.extcall.code = s390int->parm; | ||
1286 | break; | ||
1287 | case KVM_S390_INT_EMERGENCY: | ||
1288 | if (irq->u.emerg.code & 0xffff0000) | ||
1289 | return -EINVAL; | ||
1290 | irq->u.emerg.code = s390int->parm; | ||
1291 | break; | ||
1292 | case KVM_S390_MCHK: | ||
1293 | irq->u.mchk.mcic = s390int->parm64; | ||
1294 | break; | ||
1295 | } | ||
1296 | return 0; | ||
1297 | } | ||
1298 | |||
1299 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | ||
1174 | { | 1300 | { |
1175 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1301 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
1176 | struct kvm_s390_interrupt_info *inti; | ||
1177 | int rc; | 1302 | int rc; |
1178 | 1303 | ||
1179 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
1180 | if (!inti) | ||
1181 | return -ENOMEM; | ||
1182 | |||
1183 | inti->type = s390int->type; | ||
1184 | |||
1185 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, | ||
1186 | s390int->parm, 0, 2); | ||
1187 | spin_lock(&li->lock); | 1304 | spin_lock(&li->lock); |
1188 | switch (inti->type) { | 1305 | switch (irq->type) { |
1189 | case KVM_S390_PROGRAM_INT: | 1306 | case KVM_S390_PROGRAM_INT: |
1190 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", | 1307 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", |
1191 | s390int->parm); | 1308 | irq->u.pgm.code); |
1192 | inti->pgm.code = s390int->parm; | 1309 | rc = __inject_prog(vcpu, irq); |
1193 | if (s390int->parm & 0xffff0000) | ||
1194 | rc = -EINVAL; | ||
1195 | else | ||
1196 | rc = __inject_prog_irq(vcpu, inti); | ||
1197 | break; | 1310 | break; |
1198 | case KVM_S390_SIGP_SET_PREFIX: | 1311 | case KVM_S390_SIGP_SET_PREFIX: |
1199 | rc = __inject_set_prefix(vcpu, s390int, inti); | 1312 | rc = __inject_set_prefix(vcpu, irq); |
1200 | break; | 1313 | break; |
1201 | case KVM_S390_SIGP_STOP: | 1314 | case KVM_S390_SIGP_STOP: |
1202 | rc = __inject_sigp_stop(vcpu, s390int, inti); | 1315 | rc = __inject_sigp_stop(vcpu, irq); |
1203 | break; | 1316 | break; |
1204 | case KVM_S390_RESTART: | 1317 | case KVM_S390_RESTART: |
1205 | rc = __inject_sigp_restart(vcpu, s390int, inti); | 1318 | rc = __inject_sigp_restart(vcpu, irq); |
1206 | break; | 1319 | break; |
1207 | case KVM_S390_INT_CLOCK_COMP: | 1320 | case KVM_S390_INT_CLOCK_COMP: |
1208 | rc = __inject_ckc(vcpu, s390int, inti); | 1321 | rc = __inject_ckc(vcpu); |
1209 | break; | 1322 | break; |
1210 | case KVM_S390_INT_CPU_TIMER: | 1323 | case KVM_S390_INT_CPU_TIMER: |
1211 | rc = __inject_cpu_timer(vcpu, s390int, inti); | 1324 | rc = __inject_cpu_timer(vcpu); |
1212 | break; | 1325 | break; |
1213 | case KVM_S390_INT_EXTERNAL_CALL: | 1326 | case KVM_S390_INT_EXTERNAL_CALL: |
1214 | rc = __inject_extcall(vcpu, s390int, inti); | 1327 | rc = __inject_extcall(vcpu, irq); |
1215 | break; | 1328 | break; |
1216 | case KVM_S390_INT_EMERGENCY: | 1329 | case KVM_S390_INT_EMERGENCY: |
1217 | rc = __inject_sigp_emergency(vcpu, s390int, inti); | 1330 | rc = __inject_sigp_emergency(vcpu, irq); |
1218 | break; | 1331 | break; |
1219 | case KVM_S390_MCHK: | 1332 | case KVM_S390_MCHK: |
1220 | rc = __inject_mchk(vcpu, s390int, inti); | 1333 | rc = __inject_mchk(vcpu, irq); |
1221 | break; | 1334 | break; |
1222 | case KVM_S390_INT_PFAULT_INIT: | 1335 | case KVM_S390_INT_PFAULT_INIT: |
1223 | rc = __inject_pfault_init(vcpu, s390int, inti); | 1336 | rc = __inject_pfault_init(vcpu, irq); |
1224 | break; | 1337 | break; |
1225 | case KVM_S390_INT_VIRTIO: | 1338 | case KVM_S390_INT_VIRTIO: |
1226 | case KVM_S390_INT_SERVICE: | 1339 | case KVM_S390_INT_SERVICE: |
@@ -1231,8 +1344,6 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
1231 | spin_unlock(&li->lock); | 1344 | spin_unlock(&li->lock); |
1232 | if (!rc) | 1345 | if (!rc) |
1233 | kvm_s390_vcpu_wakeup(vcpu); | 1346 | kvm_s390_vcpu_wakeup(vcpu); |
1234 | else | ||
1235 | kfree(inti); | ||
1236 | return rc; | 1347 | return rc; |
1237 | } | 1348 | } |
1238 | 1349 | ||
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 06878bdf0c6b..f66591eee9ca 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -719,7 +719,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
719 | } | 719 | } |
720 | 720 | ||
721 | spin_lock_init(&vcpu->arch.local_int.lock); | 721 | spin_lock_init(&vcpu->arch.local_int.lock); |
722 | INIT_LIST_HEAD(&vcpu->arch.local_int.list); | ||
723 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; | 722 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; |
724 | vcpu->arch.local_int.wq = &vcpu->wq; | 723 | vcpu->arch.local_int.wq = &vcpu->wq; |
725 | vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; | 724 | vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; |
@@ -1122,13 +1121,15 @@ static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, | |||
1122 | unsigned long token) | 1121 | unsigned long token) |
1123 | { | 1122 | { |
1124 | struct kvm_s390_interrupt inti; | 1123 | struct kvm_s390_interrupt inti; |
1125 | inti.parm64 = token; | 1124 | struct kvm_s390_irq irq; |
1126 | 1125 | ||
1127 | if (start_token) { | 1126 | if (start_token) { |
1128 | inti.type = KVM_S390_INT_PFAULT_INIT; | 1127 | irq.u.ext.ext_params2 = token; |
1129 | WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti)); | 1128 | irq.type = KVM_S390_INT_PFAULT_INIT; |
1129 | WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq)); | ||
1130 | } else { | 1130 | } else { |
1131 | inti.type = KVM_S390_INT_PFAULT_DONE; | 1131 | inti.type = KVM_S390_INT_PFAULT_DONE; |
1132 | inti.parm64 = token; | ||
1132 | WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); | 1133 | WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); |
1133 | } | 1134 | } |
1134 | } | 1135 | } |
@@ -1622,11 +1623,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
1622 | switch (ioctl) { | 1623 | switch (ioctl) { |
1623 | case KVM_S390_INTERRUPT: { | 1624 | case KVM_S390_INTERRUPT: { |
1624 | struct kvm_s390_interrupt s390int; | 1625 | struct kvm_s390_interrupt s390int; |
1626 | struct kvm_s390_irq s390irq; | ||
1625 | 1627 | ||
1626 | r = -EFAULT; | 1628 | r = -EFAULT; |
1627 | if (copy_from_user(&s390int, argp, sizeof(s390int))) | 1629 | if (copy_from_user(&s390int, argp, sizeof(s390int))) |
1628 | break; | 1630 | break; |
1629 | r = kvm_s390_inject_vcpu(vcpu, &s390int); | 1631 | if (s390int_to_s390irq(&s390int, &s390irq)) |
1632 | return -EINVAL; | ||
1633 | r = kvm_s390_inject_vcpu(vcpu, &s390irq); | ||
1630 | break; | 1634 | break; |
1631 | } | 1635 | } |
1632 | case KVM_S390_STORE_STATUS: | 1636 | case KVM_S390_STORE_STATUS: |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index ff8d9775b758..a8f3d9b71c11 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -142,7 +142,7 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm); | |||
142 | int __must_check kvm_s390_inject_vm(struct kvm *kvm, | 142 | int __must_check kvm_s390_inject_vm(struct kvm *kvm, |
143 | struct kvm_s390_interrupt *s390int); | 143 | struct kvm_s390_interrupt *s390int); |
144 | int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | 144 | int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, |
145 | struct kvm_s390_interrupt *s390int); | 145 | struct kvm_s390_irq *irq); |
146 | int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); | 146 | int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); |
147 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, | 147 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
148 | u64 cr6, u64 schid); | 148 | u64 cr6, u64 schid); |
@@ -224,6 +224,9 @@ static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc) | |||
224 | return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); | 224 | return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); |
225 | } | 225 | } |
226 | 226 | ||
227 | int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, | ||
228 | struct kvm_s390_irq *s390irq); | ||
229 | |||
227 | /* implemented in interrupt.c */ | 230 | /* implemented in interrupt.c */ |
228 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); | 231 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); |
229 | int psw_extint_disabled(struct kvm_vcpu *vcpu); | 232 | int psw_extint_disabled(struct kvm_vcpu *vcpu); |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index f7cd3f774f25..6651f9f73973 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -49,13 +49,13 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, | |||
49 | static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, | 49 | static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, |
50 | struct kvm_vcpu *dst_vcpu) | 50 | struct kvm_vcpu *dst_vcpu) |
51 | { | 51 | { |
52 | struct kvm_s390_interrupt s390int = { | 52 | struct kvm_s390_irq irq = { |
53 | .type = KVM_S390_INT_EMERGENCY, | 53 | .type = KVM_S390_INT_EMERGENCY, |
54 | .parm = vcpu->vcpu_id, | 54 | .u.emerg.code = vcpu->vcpu_id, |
55 | }; | 55 | }; |
56 | int rc = 0; | 56 | int rc = 0; |
57 | 57 | ||
58 | rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int); | 58 | rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); |
59 | if (!rc) | 59 | if (!rc) |
60 | VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", | 60 | VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", |
61 | dst_vcpu->vcpu_id); | 61 | dst_vcpu->vcpu_id); |
@@ -98,13 +98,13 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, | |||
98 | static int __sigp_external_call(struct kvm_vcpu *vcpu, | 98 | static int __sigp_external_call(struct kvm_vcpu *vcpu, |
99 | struct kvm_vcpu *dst_vcpu) | 99 | struct kvm_vcpu *dst_vcpu) |
100 | { | 100 | { |
101 | struct kvm_s390_interrupt s390int = { | 101 | struct kvm_s390_irq irq = { |
102 | .type = KVM_S390_INT_EXTERNAL_CALL, | 102 | .type = KVM_S390_INT_EXTERNAL_CALL, |
103 | .parm = vcpu->vcpu_id, | 103 | .u.extcall.code = vcpu->vcpu_id, |
104 | }; | 104 | }; |
105 | int rc; | 105 | int rc; |
106 | 106 | ||
107 | rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int); | 107 | rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); |
108 | if (!rc) | 108 | if (!rc) |
109 | VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", | 109 | VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", |
110 | dst_vcpu->vcpu_id); | 110 | dst_vcpu->vcpu_id); |
@@ -115,29 +115,20 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, | |||
115 | static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action) | 115 | static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action) |
116 | { | 116 | { |
117 | struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; | 117 | struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; |
118 | struct kvm_s390_interrupt_info *inti; | ||
119 | int rc = SIGP_CC_ORDER_CODE_ACCEPTED; | 118 | int rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
120 | 119 | ||
121 | inti = kzalloc(sizeof(*inti), GFP_ATOMIC); | ||
122 | if (!inti) | ||
123 | return -ENOMEM; | ||
124 | inti->type = KVM_S390_SIGP_STOP; | ||
125 | |||
126 | spin_lock(&li->lock); | 120 | spin_lock(&li->lock); |
127 | if (li->action_bits & ACTION_STOP_ON_STOP) { | 121 | if (li->action_bits & ACTION_STOP_ON_STOP) { |
128 | /* another SIGP STOP is pending */ | 122 | /* another SIGP STOP is pending */ |
129 | kfree(inti); | ||
130 | rc = SIGP_CC_BUSY; | 123 | rc = SIGP_CC_BUSY; |
131 | goto out; | 124 | goto out; |
132 | } | 125 | } |
133 | if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { | 126 | if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { |
134 | kfree(inti); | ||
135 | if ((action & ACTION_STORE_ON_STOP) != 0) | 127 | if ((action & ACTION_STORE_ON_STOP) != 0) |
136 | rc = -ESHUTDOWN; | 128 | rc = -ESHUTDOWN; |
137 | goto out; | 129 | goto out; |
138 | } | 130 | } |
139 | list_add_tail(&inti->list, &li->list); | 131 | set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); |
140 | atomic_set(&li->active, 1); | ||
141 | li->action_bits |= action; | 132 | li->action_bits |= action; |
142 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); | 133 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); |
143 | kvm_s390_vcpu_wakeup(dst_vcpu); | 134 | kvm_s390_vcpu_wakeup(dst_vcpu); |
@@ -207,7 +198,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, | |||
207 | u32 address, u64 *reg) | 198 | u32 address, u64 *reg) |
208 | { | 199 | { |
209 | struct kvm_s390_local_interrupt *li; | 200 | struct kvm_s390_local_interrupt *li; |
210 | struct kvm_s390_interrupt_info *inti; | ||
211 | int rc; | 201 | int rc; |
212 | 202 | ||
213 | li = &dst_vcpu->arch.local_int; | 203 | li = &dst_vcpu->arch.local_int; |
@@ -224,25 +214,17 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, | |||
224 | return SIGP_CC_STATUS_STORED; | 214 | return SIGP_CC_STATUS_STORED; |
225 | } | 215 | } |
226 | 216 | ||
227 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
228 | if (!inti) | ||
229 | return SIGP_CC_BUSY; | ||
230 | |||
231 | spin_lock(&li->lock); | 217 | spin_lock(&li->lock); |
232 | /* cpu must be in stopped state */ | 218 | /* cpu must be in stopped state */ |
233 | if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { | 219 | if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { |
234 | *reg &= 0xffffffff00000000UL; | 220 | *reg &= 0xffffffff00000000UL; |
235 | *reg |= SIGP_STATUS_INCORRECT_STATE; | 221 | *reg |= SIGP_STATUS_INCORRECT_STATE; |
236 | rc = SIGP_CC_STATUS_STORED; | 222 | rc = SIGP_CC_STATUS_STORED; |
237 | kfree(inti); | ||
238 | goto out_li; | 223 | goto out_li; |
239 | } | 224 | } |
240 | 225 | ||
241 | inti->type = KVM_S390_SIGP_SET_PREFIX; | 226 | li->irq.prefix.address = address; |
242 | inti->prefix.address = address; | 227 | set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); |
243 | |||
244 | list_add_tail(&inti->list, &li->list); | ||
245 | atomic_set(&li->active, 1); | ||
246 | kvm_s390_vcpu_wakeup(dst_vcpu); | 228 | kvm_s390_vcpu_wakeup(dst_vcpu); |
247 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; | 229 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
248 | 230 | ||