diff options
author | Jens Freimann <jfrei@linux.vnet.ibm.com> | 2013-07-03 09:18:35 -0400 |
---|---|---|
committer | Christian Borntraeger <borntraeger@de.ibm.com> | 2015-03-31 15:07:27 -0400 |
commit | 6d3da241416e6088f83a7ff1f37fb6bb518d9bc8 (patch) | |
tree | c10a62568354ed53722f735f8a4ad600d720b177 | |
parent | 94aa033efcac47b09db22cb561e135baf37b7887 (diff) |
KVM: s390: deliver floating interrupts in order of priority
This patch makes interrupt handling compliant to the z/Architecture
Principles of Operation with regard to interrupt priorities.
Add a bitmap for pending floating interrupts. Each bit relates to a
interrupt type and its list. A turned on bit indicates that a list
contains items (interrupts) which need to be delivered. When delivering
interrupts on a cpu we can merge the existing bitmap for cpu-local
interrupts and floating interrupts and have a single mechanism for
delivery.
Currently we have one list for all kinds of floating interrupts and a
corresponding spin lock. This patch adds a separate list per
interrupt type. An exception to this are service signal and machine check
interrupts, as there can be only one pending interrupt at a time.
Signed-off-by: Jens Freimann <jfrei@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Cornelia Huck <cornelia.huck@de.ibm.com>
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 30 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 832 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 4 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.h | 2 | ||||
-rw-r--r-- | arch/s390/kvm/priv.c | 9 |
5 files changed, 510 insertions, 367 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index b8d1e97fb201..d01fc588b5c3 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -344,6 +344,11 @@ enum irq_types { | |||
344 | IRQ_PEND_COUNT | 344 | IRQ_PEND_COUNT |
345 | }; | 345 | }; |
346 | 346 | ||
347 | /* We have 2M for virtio device descriptor pages. Smallest amount of | ||
348 | * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381 | ||
349 | */ | ||
350 | #define KVM_S390_MAX_VIRTIO_IRQS 87381 | ||
351 | |||
347 | /* | 352 | /* |
348 | * Repressible (non-floating) machine check interrupts | 353 | * Repressible (non-floating) machine check interrupts |
349 | * subclass bits in MCIC | 354 | * subclass bits in MCIC |
@@ -421,13 +426,32 @@ struct kvm_s390_local_interrupt { | |||
421 | unsigned long pending_irqs; | 426 | unsigned long pending_irqs; |
422 | }; | 427 | }; |
423 | 428 | ||
429 | #define FIRQ_LIST_IO_ISC_0 0 | ||
430 | #define FIRQ_LIST_IO_ISC_1 1 | ||
431 | #define FIRQ_LIST_IO_ISC_2 2 | ||
432 | #define FIRQ_LIST_IO_ISC_3 3 | ||
433 | #define FIRQ_LIST_IO_ISC_4 4 | ||
434 | #define FIRQ_LIST_IO_ISC_5 5 | ||
435 | #define FIRQ_LIST_IO_ISC_6 6 | ||
436 | #define FIRQ_LIST_IO_ISC_7 7 | ||
437 | #define FIRQ_LIST_PFAULT 8 | ||
438 | #define FIRQ_LIST_VIRTIO 9 | ||
439 | #define FIRQ_LIST_COUNT 10 | ||
440 | #define FIRQ_CNTR_IO 0 | ||
441 | #define FIRQ_CNTR_SERVICE 1 | ||
442 | #define FIRQ_CNTR_VIRTIO 2 | ||
443 | #define FIRQ_CNTR_PFAULT 3 | ||
444 | #define FIRQ_MAX_COUNT 4 | ||
445 | |||
424 | struct kvm_s390_float_interrupt { | 446 | struct kvm_s390_float_interrupt { |
447 | unsigned long pending_irqs; | ||
425 | spinlock_t lock; | 448 | spinlock_t lock; |
426 | struct list_head list; | 449 | struct list_head lists[FIRQ_LIST_COUNT]; |
427 | atomic_t active; | 450 | int counters[FIRQ_MAX_COUNT]; |
451 | struct kvm_s390_mchk_info mchk; | ||
452 | struct kvm_s390_ext_info srv_signal; | ||
428 | int next_rr_cpu; | 453 | int next_rr_cpu; |
429 | unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)]; | 454 | unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)]; |
430 | unsigned int irq_count; | ||
431 | }; | 455 | }; |
432 | 456 | ||
433 | struct kvm_hw_wp_info_arch { | 457 | struct kvm_hw_wp_info_arch { |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 5ebd500e6400..2872fdb4d01a 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/dis.h> | 22 | #include <asm/dis.h> |
23 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
24 | #include <asm/sclp.h> | 24 | #include <asm/sclp.h> |
25 | #include <asm/isc.h> | ||
25 | #include "kvm-s390.h" | 26 | #include "kvm-s390.h" |
26 | #include "gaccess.h" | 27 | #include "gaccess.h" |
27 | #include "trace-s390.h" | 28 | #include "trace-s390.h" |
@@ -34,11 +35,6 @@ | |||
34 | #define PFAULT_DONE 0x0680 | 35 | #define PFAULT_DONE 0x0680 |
35 | #define VIRTIO_PARAM 0x0d00 | 36 | #define VIRTIO_PARAM 0x0d00 |
36 | 37 | ||
37 | static int is_ioint(u64 type) | ||
38 | { | ||
39 | return ((type & 0xfffe0000u) != 0xfffe0000u); | ||
40 | } | ||
41 | |||
42 | int psw_extint_disabled(struct kvm_vcpu *vcpu) | 38 | int psw_extint_disabled(struct kvm_vcpu *vcpu) |
43 | { | 39 | { |
44 | return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); | 40 | return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); |
@@ -74,70 +70,25 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) | |||
74 | return 1; | 70 | return 1; |
75 | } | 71 | } |
76 | 72 | ||
77 | static u64 int_word_to_isc_bits(u32 int_word) | 73 | static inline int is_ioirq(unsigned long irq_type) |
78 | { | 74 | { |
79 | u8 isc = (int_word & 0x38000000) >> 27; | 75 | return ((irq_type >= IRQ_PEND_IO_ISC_0) && |
76 | (irq_type <= IRQ_PEND_IO_ISC_7)); | ||
77 | } | ||
80 | 78 | ||
79 | static uint64_t isc_to_isc_bits(int isc) | ||
80 | { | ||
81 | return (0x80 >> isc) << 24; | 81 | return (0x80 >> isc) << 24; |
82 | } | 82 | } |
83 | 83 | ||
84 | static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu, | 84 | static inline u8 int_word_to_isc(u32 int_word) |
85 | struct kvm_s390_interrupt_info *inti) | ||
86 | { | 85 | { |
87 | switch (inti->type) { | 86 | return (int_word & 0x38000000) >> 27; |
88 | case KVM_S390_INT_EXTERNAL_CALL: | 87 | } |
89 | if (psw_extint_disabled(vcpu)) | 88 | |
90 | return 0; | 89 | static inline unsigned long pending_floating_irqs(struct kvm_vcpu *vcpu) |
91 | if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) | 90 | { |
92 | return 1; | 91 | return vcpu->kvm->arch.float_int.pending_irqs; |
93 | return 0; | ||
94 | case KVM_S390_INT_EMERGENCY: | ||
95 | if (psw_extint_disabled(vcpu)) | ||
96 | return 0; | ||
97 | if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) | ||
98 | return 1; | ||
99 | return 0; | ||
100 | case KVM_S390_INT_CLOCK_COMP: | ||
101 | return ckc_interrupts_enabled(vcpu); | ||
102 | case KVM_S390_INT_CPU_TIMER: | ||
103 | if (psw_extint_disabled(vcpu)) | ||
104 | return 0; | ||
105 | if (vcpu->arch.sie_block->gcr[0] & 0x400ul) | ||
106 | return 1; | ||
107 | return 0; | ||
108 | case KVM_S390_INT_SERVICE: | ||
109 | case KVM_S390_INT_PFAULT_INIT: | ||
110 | case KVM_S390_INT_PFAULT_DONE: | ||
111 | case KVM_S390_INT_VIRTIO: | ||
112 | if (psw_extint_disabled(vcpu)) | ||
113 | return 0; | ||
114 | if (vcpu->arch.sie_block->gcr[0] & 0x200ul) | ||
115 | return 1; | ||
116 | return 0; | ||
117 | case KVM_S390_PROGRAM_INT: | ||
118 | case KVM_S390_SIGP_STOP: | ||
119 | case KVM_S390_SIGP_SET_PREFIX: | ||
120 | case KVM_S390_RESTART: | ||
121 | return 1; | ||
122 | case KVM_S390_MCHK: | ||
123 | if (psw_mchk_disabled(vcpu)) | ||
124 | return 0; | ||
125 | if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14) | ||
126 | return 1; | ||
127 | return 0; | ||
128 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | ||
129 | if (psw_ioint_disabled(vcpu)) | ||
130 | return 0; | ||
131 | if (vcpu->arch.sie_block->gcr[6] & | ||
132 | int_word_to_isc_bits(inti->io.io_int_word)) | ||
133 | return 1; | ||
134 | return 0; | ||
135 | default: | ||
136 | printk(KERN_WARNING "illegal interrupt type %llx\n", | ||
137 | inti->type); | ||
138 | BUG(); | ||
139 | } | ||
140 | return 0; | ||
141 | } | 92 | } |
142 | 93 | ||
143 | static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) | 94 | static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) |
@@ -145,12 +96,31 @@ static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) | |||
145 | return vcpu->arch.local_int.pending_irqs; | 96 | return vcpu->arch.local_int.pending_irqs; |
146 | } | 97 | } |
147 | 98 | ||
148 | static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu) | 99 | static unsigned long disable_iscs(struct kvm_vcpu *vcpu, |
100 | unsigned long active_mask) | ||
149 | { | 101 | { |
150 | unsigned long active_mask = pending_local_irqs(vcpu); | 102 | int i; |
103 | |||
104 | for (i = 0; i <= MAX_ISC; i++) | ||
105 | if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i))) | ||
106 | active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i)); | ||
107 | |||
108 | return active_mask; | ||
109 | } | ||
110 | |||
111 | static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) | ||
112 | { | ||
113 | unsigned long active_mask; | ||
114 | |||
115 | active_mask = pending_local_irqs(vcpu); | ||
116 | active_mask |= pending_floating_irqs(vcpu); | ||
151 | 117 | ||
152 | if (psw_extint_disabled(vcpu)) | 118 | if (psw_extint_disabled(vcpu)) |
153 | active_mask &= ~IRQ_PEND_EXT_MASK; | 119 | active_mask &= ~IRQ_PEND_EXT_MASK; |
120 | if (psw_ioint_disabled(vcpu)) | ||
121 | active_mask &= ~IRQ_PEND_IO_MASK; | ||
122 | else | ||
123 | active_mask = disable_iscs(vcpu, active_mask); | ||
154 | if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) | 124 | if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) |
155 | __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); | 125 | __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); |
156 | if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) | 126 | if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) |
@@ -159,8 +129,13 @@ static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu) | |||
159 | __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); | 129 | __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); |
160 | if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) | 130 | if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) |
161 | __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); | 131 | __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); |
132 | if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) | ||
133 | __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask); | ||
162 | if (psw_mchk_disabled(vcpu)) | 134 | if (psw_mchk_disabled(vcpu)) |
163 | active_mask &= ~IRQ_PEND_MCHK_MASK; | 135 | active_mask &= ~IRQ_PEND_MCHK_MASK; |
136 | if (!(vcpu->arch.sie_block->gcr[14] & | ||
137 | vcpu->kvm->arch.float_int.mchk.cr14)) | ||
138 | __clear_bit(IRQ_PEND_MCHK_REP, &active_mask); | ||
164 | 139 | ||
165 | /* | 140 | /* |
166 | * STOP irqs will never be actively delivered. They are triggered via | 141 | * STOP irqs will never be actively delivered. They are triggered via |
@@ -202,6 +177,16 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) | |||
202 | atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); | 177 | atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); |
203 | } | 178 | } |
204 | 179 | ||
180 | static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) | ||
181 | { | ||
182 | if (!(pending_floating_irqs(vcpu) & IRQ_PEND_IO_MASK)) | ||
183 | return; | ||
184 | else if (psw_ioint_disabled(vcpu)) | ||
185 | __set_cpuflag(vcpu, CPUSTAT_IO_INT); | ||
186 | else | ||
187 | vcpu->arch.sie_block->lctl |= LCTL_CR6; | ||
188 | } | ||
189 | |||
205 | static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) | 190 | static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) |
206 | { | 191 | { |
207 | if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK)) | 192 | if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK)) |
@@ -228,43 +213,15 @@ static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu) | |||
228 | __set_cpuflag(vcpu, CPUSTAT_STOP_INT); | 213 | __set_cpuflag(vcpu, CPUSTAT_STOP_INT); |
229 | } | 214 | } |
230 | 215 | ||
231 | /* Set interception request for non-deliverable local interrupts */ | 216 | /* Set interception request for non-deliverable interrupts */ |
232 | static void set_intercept_indicators_local(struct kvm_vcpu *vcpu) | 217 | static void set_intercept_indicators(struct kvm_vcpu *vcpu) |
233 | { | 218 | { |
219 | set_intercept_indicators_io(vcpu); | ||
234 | set_intercept_indicators_ext(vcpu); | 220 | set_intercept_indicators_ext(vcpu); |
235 | set_intercept_indicators_mchk(vcpu); | 221 | set_intercept_indicators_mchk(vcpu); |
236 | set_intercept_indicators_stop(vcpu); | 222 | set_intercept_indicators_stop(vcpu); |
237 | } | 223 | } |
238 | 224 | ||
239 | static void __set_intercept_indicator(struct kvm_vcpu *vcpu, | ||
240 | struct kvm_s390_interrupt_info *inti) | ||
241 | { | ||
242 | switch (inti->type) { | ||
243 | case KVM_S390_INT_SERVICE: | ||
244 | case KVM_S390_INT_PFAULT_DONE: | ||
245 | case KVM_S390_INT_VIRTIO: | ||
246 | if (psw_extint_disabled(vcpu)) | ||
247 | __set_cpuflag(vcpu, CPUSTAT_EXT_INT); | ||
248 | else | ||
249 | vcpu->arch.sie_block->lctl |= LCTL_CR0; | ||
250 | break; | ||
251 | case KVM_S390_MCHK: | ||
252 | if (psw_mchk_disabled(vcpu)) | ||
253 | vcpu->arch.sie_block->ictl |= ICTL_LPSW; | ||
254 | else | ||
255 | vcpu->arch.sie_block->lctl |= LCTL_CR14; | ||
256 | break; | ||
257 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | ||
258 | if (psw_ioint_disabled(vcpu)) | ||
259 | __set_cpuflag(vcpu, CPUSTAT_IO_INT); | ||
260 | else | ||
261 | vcpu->arch.sie_block->lctl |= LCTL_CR6; | ||
262 | break; | ||
263 | default: | ||
264 | BUG(); | ||
265 | } | ||
266 | } | ||
267 | |||
268 | static u16 get_ilc(struct kvm_vcpu *vcpu) | 225 | static u16 get_ilc(struct kvm_vcpu *vcpu) |
269 | { | 226 | { |
270 | switch (vcpu->arch.sie_block->icptcode) { | 227 | switch (vcpu->arch.sie_block->icptcode) { |
@@ -350,42 +307,72 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) | |||
350 | 307 | ||
351 | static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) | 308 | static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) |
352 | { | 309 | { |
310 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; | ||
353 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 311 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
354 | struct kvm_s390_mchk_info mchk; | 312 | struct kvm_s390_mchk_info mchk = {}; |
355 | unsigned long adtl_status_addr; | 313 | unsigned long adtl_status_addr; |
356 | int rc; | 314 | int deliver = 0; |
315 | int rc = 0; | ||
357 | 316 | ||
317 | spin_lock(&fi->lock); | ||
358 | spin_lock(&li->lock); | 318 | spin_lock(&li->lock); |
359 | mchk = li->irq.mchk; | 319 | if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) || |
320 | test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) { | ||
321 | /* | ||
322 | * If there was an exigent machine check pending, then any | ||
323 | * repressible machine checks that might have been pending | ||
324 | * are indicated along with it, so always clear bits for | ||
325 | * repressible and exigent interrupts | ||
326 | */ | ||
327 | mchk = li->irq.mchk; | ||
328 | clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); | ||
329 | clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); | ||
330 | memset(&li->irq.mchk, 0, sizeof(mchk)); | ||
331 | deliver = 1; | ||
332 | } | ||
360 | /* | 333 | /* |
361 | * If there was an exigent machine check pending, then any repressible | 334 | * We indicate floating repressible conditions along with |
362 | * machine checks that might have been pending are indicated along | 335 | * other pending conditions. Channel Report Pending and Channel |
363 | * with it, so always clear both bits | 336 | * Subsystem damage are the only two and and are indicated by |
337 | * bits in mcic and masked in cr14. | ||
364 | */ | 338 | */ |
365 | clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); | 339 | if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { |
366 | clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); | 340 | mchk.mcic |= fi->mchk.mcic; |
367 | memset(&li->irq.mchk, 0, sizeof(mchk)); | 341 | mchk.cr14 |= fi->mchk.cr14; |
342 | memset(&fi->mchk, 0, sizeof(mchk)); | ||
343 | deliver = 1; | ||
344 | } | ||
368 | spin_unlock(&li->lock); | 345 | spin_unlock(&li->lock); |
346 | spin_unlock(&fi->lock); | ||
369 | 347 | ||
370 | VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", | 348 | if (deliver) { |
371 | mchk.mcic); | 349 | VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", |
372 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, | 350 | mchk.mcic); |
373 | mchk.cr14, mchk.mcic); | 351 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
374 | 352 | KVM_S390_MCHK, | |
375 | rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); | 353 | mchk.cr14, mchk.mcic); |
376 | rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, | 354 | |
377 | &adtl_status_addr, sizeof(unsigned long)); | 355 | rc = kvm_s390_vcpu_store_status(vcpu, |
378 | rc |= kvm_s390_vcpu_store_adtl_status(vcpu, adtl_status_addr); | 356 | KVM_S390_STORE_STATUS_PREFIXED); |
379 | rc |= put_guest_lc(vcpu, mchk.mcic, | 357 | rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, |
380 | (u64 __user *) __LC_MCCK_CODE); | 358 | &adtl_status_addr, |
381 | rc |= put_guest_lc(vcpu, mchk.failing_storage_address, | 359 | sizeof(unsigned long)); |
382 | (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); | 360 | rc |= kvm_s390_vcpu_store_adtl_status(vcpu, |
383 | rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, | 361 | adtl_status_addr); |
384 | &mchk.fixed_logout, sizeof(mchk.fixed_logout)); | 362 | rc |= put_guest_lc(vcpu, mchk.mcic, |
385 | rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, | 363 | (u64 __user *) __LC_MCCK_CODE); |
386 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 364 | rc |= put_guest_lc(vcpu, mchk.failing_storage_address, |
387 | rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, | 365 | (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); |
388 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 366 | rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, |
367 | &mchk.fixed_logout, | ||
368 | sizeof(mchk.fixed_logout)); | ||
369 | rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, | ||
370 | &vcpu->arch.sie_block->gpsw, | ||
371 | sizeof(psw_t)); | ||
372 | rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, | ||
373 | &vcpu->arch.sie_block->gpsw, | ||
374 | sizeof(psw_t)); | ||
375 | } | ||
389 | return rc ? -EFAULT : 0; | 376 | return rc ? -EFAULT : 0; |
390 | } | 377 | } |
391 | 378 | ||
@@ -597,16 +584,27 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
597 | return rc ? -EFAULT : 0; | 584 | return rc ? -EFAULT : 0; |
598 | } | 585 | } |
599 | 586 | ||
600 | static int __must_check __deliver_service(struct kvm_vcpu *vcpu, | 587 | static int __must_check __deliver_service(struct kvm_vcpu *vcpu) |
601 | struct kvm_s390_interrupt_info *inti) | ||
602 | { | 588 | { |
603 | int rc; | 589 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
590 | struct kvm_s390_ext_info ext; | ||
591 | int rc = 0; | ||
592 | |||
593 | spin_lock(&fi->lock); | ||
594 | if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) { | ||
595 | spin_unlock(&fi->lock); | ||
596 | return 0; | ||
597 | } | ||
598 | ext = fi->srv_signal; | ||
599 | memset(&fi->srv_signal, 0, sizeof(ext)); | ||
600 | clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); | ||
601 | spin_unlock(&fi->lock); | ||
604 | 602 | ||
605 | VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", | 603 | VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", |
606 | inti->ext.ext_params); | 604 | ext.ext_params); |
607 | vcpu->stat.deliver_service_signal++; | 605 | vcpu->stat.deliver_service_signal++; |
608 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 606 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, |
609 | inti->ext.ext_params, 0); | 607 | ext.ext_params, 0); |
610 | 608 | ||
611 | rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); | 609 | rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); |
612 | rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); | 610 | rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
@@ -614,106 +612,146 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu, | |||
614 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 612 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
615 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | 613 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
616 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 614 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
617 | rc |= put_guest_lc(vcpu, inti->ext.ext_params, | 615 | rc |= put_guest_lc(vcpu, ext.ext_params, |
618 | (u32 *)__LC_EXT_PARAMS); | 616 | (u32 *)__LC_EXT_PARAMS); |
617 | |||
619 | return rc ? -EFAULT : 0; | 618 | return rc ? -EFAULT : 0; |
620 | } | 619 | } |
621 | 620 | ||
622 | static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu, | 621 | static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) |
623 | struct kvm_s390_interrupt_info *inti) | ||
624 | { | 622 | { |
625 | int rc; | 623 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
624 | struct kvm_s390_interrupt_info *inti; | ||
625 | int rc = 0; | ||
626 | 626 | ||
627 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | 627 | spin_lock(&fi->lock); |
628 | KVM_S390_INT_PFAULT_DONE, 0, | 628 | inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT], |
629 | inti->ext.ext_params2); | 629 | struct kvm_s390_interrupt_info, |
630 | list); | ||
631 | if (inti) { | ||
632 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | ||
633 | KVM_S390_INT_PFAULT_DONE, 0, | ||
634 | inti->ext.ext_params2); | ||
635 | list_del(&inti->list); | ||
636 | fi->counters[FIRQ_CNTR_PFAULT] -= 1; | ||
637 | } | ||
638 | if (list_empty(&fi->lists[FIRQ_LIST_PFAULT])) | ||
639 | clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); | ||
640 | spin_unlock(&fi->lock); | ||
630 | 641 | ||
631 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE); | 642 | if (inti) { |
632 | rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR); | 643 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, |
633 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, | 644 | (u16 *)__LC_EXT_INT_CODE); |
634 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 645 | rc |= put_guest_lc(vcpu, PFAULT_DONE, |
635 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | 646 | (u16 *)__LC_EXT_CPU_ADDR); |
636 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 647 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
637 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, | 648 | &vcpu->arch.sie_block->gpsw, |
638 | (u64 *)__LC_EXT_PARAMS2); | 649 | sizeof(psw_t)); |
650 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | ||
651 | &vcpu->arch.sie_block->gpsw, | ||
652 | sizeof(psw_t)); | ||
653 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, | ||
654 | (u64 *)__LC_EXT_PARAMS2); | ||
655 | kfree(inti); | ||
656 | } | ||
639 | return rc ? -EFAULT : 0; | 657 | return rc ? -EFAULT : 0; |
640 | } | 658 | } |
641 | 659 | ||
642 | static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu, | 660 | static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu) |
643 | struct kvm_s390_interrupt_info *inti) | ||
644 | { | 661 | { |
645 | int rc; | 662 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
663 | struct kvm_s390_interrupt_info *inti; | ||
664 | int rc = 0; | ||
646 | 665 | ||
647 | VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", | 666 | spin_lock(&fi->lock); |
648 | inti->ext.ext_params, inti->ext.ext_params2); | 667 | inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO], |
649 | vcpu->stat.deliver_virtio_interrupt++; | 668 | struct kvm_s390_interrupt_info, |
650 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 669 | list); |
651 | inti->ext.ext_params, | 670 | if (inti) { |
652 | inti->ext.ext_params2); | 671 | VCPU_EVENT(vcpu, 4, |
672 | "interrupt: virtio parm:%x,parm64:%llx", | ||
673 | inti->ext.ext_params, inti->ext.ext_params2); | ||
674 | vcpu->stat.deliver_virtio_interrupt++; | ||
675 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | ||
676 | inti->type, | ||
677 | inti->ext.ext_params, | ||
678 | inti->ext.ext_params2); | ||
679 | list_del(&inti->list); | ||
680 | fi->counters[FIRQ_CNTR_VIRTIO] -= 1; | ||
681 | } | ||
682 | if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO])) | ||
683 | clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); | ||
684 | spin_unlock(&fi->lock); | ||
653 | 685 | ||
654 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE); | 686 | if (inti) { |
655 | rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR); | 687 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, |
656 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, | 688 | (u16 *)__LC_EXT_INT_CODE); |
657 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 689 | rc |= put_guest_lc(vcpu, VIRTIO_PARAM, |
658 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | 690 | (u16 *)__LC_EXT_CPU_ADDR); |
659 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 691 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
660 | rc |= put_guest_lc(vcpu, inti->ext.ext_params, | 692 | &vcpu->arch.sie_block->gpsw, |
661 | (u32 *)__LC_EXT_PARAMS); | 693 | sizeof(psw_t)); |
662 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, | 694 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
663 | (u64 *)__LC_EXT_PARAMS2); | 695 | &vcpu->arch.sie_block->gpsw, |
696 | sizeof(psw_t)); | ||
697 | rc |= put_guest_lc(vcpu, inti->ext.ext_params, | ||
698 | (u32 *)__LC_EXT_PARAMS); | ||
699 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, | ||
700 | (u64 *)__LC_EXT_PARAMS2); | ||
701 | kfree(inti); | ||
702 | } | ||
664 | return rc ? -EFAULT : 0; | 703 | return rc ? -EFAULT : 0; |
665 | } | 704 | } |
666 | 705 | ||
667 | static int __must_check __deliver_io(struct kvm_vcpu *vcpu, | 706 | static int __must_check __deliver_io(struct kvm_vcpu *vcpu, |
668 | struct kvm_s390_interrupt_info *inti) | 707 | unsigned long irq_type) |
669 | { | 708 | { |
670 | int rc; | 709 | struct list_head *isc_list; |
710 | struct kvm_s390_float_interrupt *fi; | ||
711 | struct kvm_s390_interrupt_info *inti = NULL; | ||
712 | int rc = 0; | ||
671 | 713 | ||
672 | VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); | 714 | fi = &vcpu->kvm->arch.float_int; |
673 | vcpu->stat.deliver_io_int++; | ||
674 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | ||
675 | ((__u32)inti->io.subchannel_id << 16) | | ||
676 | inti->io.subchannel_nr, | ||
677 | ((__u64)inti->io.io_int_parm << 32) | | ||
678 | inti->io.io_int_word); | ||
679 | |||
680 | rc = put_guest_lc(vcpu, inti->io.subchannel_id, | ||
681 | (u16 *)__LC_SUBCHANNEL_ID); | ||
682 | rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, | ||
683 | (u16 *)__LC_SUBCHANNEL_NR); | ||
684 | rc |= put_guest_lc(vcpu, inti->io.io_int_parm, | ||
685 | (u32 *)__LC_IO_INT_PARM); | ||
686 | rc |= put_guest_lc(vcpu, inti->io.io_int_word, | ||
687 | (u32 *)__LC_IO_INT_WORD); | ||
688 | rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, | ||
689 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
690 | rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, | ||
691 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
692 | return rc ? -EFAULT : 0; | ||
693 | } | ||
694 | 715 | ||
695 | static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu, | 716 | spin_lock(&fi->lock); |
696 | struct kvm_s390_interrupt_info *inti) | 717 | isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0]; |
697 | { | 718 | inti = list_first_entry_or_null(isc_list, |
698 | struct kvm_s390_mchk_info *mchk = &inti->mchk; | 719 | struct kvm_s390_interrupt_info, |
699 | int rc; | 720 | list); |
721 | if (inti) { | ||
722 | VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); | ||
723 | vcpu->stat.deliver_io_int++; | ||
724 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | ||
725 | inti->type, | ||
726 | ((__u32)inti->io.subchannel_id << 16) | | ||
727 | inti->io.subchannel_nr, | ||
728 | ((__u64)inti->io.io_int_parm << 32) | | ||
729 | inti->io.io_int_word); | ||
730 | list_del(&inti->list); | ||
731 | fi->counters[FIRQ_CNTR_IO] -= 1; | ||
732 | } | ||
733 | if (list_empty(isc_list)) | ||
734 | clear_bit(irq_type, &fi->pending_irqs); | ||
735 | spin_unlock(&fi->lock); | ||
736 | |||
737 | if (inti) { | ||
738 | rc = put_guest_lc(vcpu, inti->io.subchannel_id, | ||
739 | (u16 *)__LC_SUBCHANNEL_ID); | ||
740 | rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, | ||
741 | (u16 *)__LC_SUBCHANNEL_NR); | ||
742 | rc |= put_guest_lc(vcpu, inti->io.io_int_parm, | ||
743 | (u32 *)__LC_IO_INT_PARM); | ||
744 | rc |= put_guest_lc(vcpu, inti->io.io_int_word, | ||
745 | (u32 *)__LC_IO_INT_WORD); | ||
746 | rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, | ||
747 | &vcpu->arch.sie_block->gpsw, | ||
748 | sizeof(psw_t)); | ||
749 | rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, | ||
750 | &vcpu->arch.sie_block->gpsw, | ||
751 | sizeof(psw_t)); | ||
752 | kfree(inti); | ||
753 | } | ||
700 | 754 | ||
701 | VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", | ||
702 | mchk->mcic); | ||
703 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, | ||
704 | mchk->cr14, mchk->mcic); | ||
705 | |||
706 | rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); | ||
707 | rc |= put_guest_lc(vcpu, mchk->mcic, | ||
708 | (u64 __user *) __LC_MCCK_CODE); | ||
709 | rc |= put_guest_lc(vcpu, mchk->failing_storage_address, | ||
710 | (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); | ||
711 | rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, | ||
712 | &mchk->fixed_logout, sizeof(mchk->fixed_logout)); | ||
713 | rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, | ||
714 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
715 | rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, | ||
716 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
717 | return rc ? -EFAULT : 0; | 755 | return rc ? -EFAULT : 0; |
718 | } | 756 | } |
719 | 757 | ||
@@ -721,6 +759,7 @@ typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu); | |||
721 | 759 | ||
722 | static const deliver_irq_t deliver_irq_funcs[] = { | 760 | static const deliver_irq_t deliver_irq_funcs[] = { |
723 | [IRQ_PEND_MCHK_EX] = __deliver_machine_check, | 761 | [IRQ_PEND_MCHK_EX] = __deliver_machine_check, |
762 | [IRQ_PEND_MCHK_REP] = __deliver_machine_check, | ||
724 | [IRQ_PEND_PROG] = __deliver_prog, | 763 | [IRQ_PEND_PROG] = __deliver_prog, |
725 | [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal, | 764 | [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal, |
726 | [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call, | 765 | [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call, |
@@ -729,36 +768,11 @@ static const deliver_irq_t deliver_irq_funcs[] = { | |||
729 | [IRQ_PEND_RESTART] = __deliver_restart, | 768 | [IRQ_PEND_RESTART] = __deliver_restart, |
730 | [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, | 769 | [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, |
731 | [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, | 770 | [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, |
771 | [IRQ_PEND_EXT_SERVICE] = __deliver_service, | ||
772 | [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done, | ||
773 | [IRQ_PEND_VIRTIO] = __deliver_virtio, | ||
732 | }; | 774 | }; |
733 | 775 | ||
734 | static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu, | ||
735 | struct kvm_s390_interrupt_info *inti) | ||
736 | { | ||
737 | int rc; | ||
738 | |||
739 | switch (inti->type) { | ||
740 | case KVM_S390_INT_SERVICE: | ||
741 | rc = __deliver_service(vcpu, inti); | ||
742 | break; | ||
743 | case KVM_S390_INT_PFAULT_DONE: | ||
744 | rc = __deliver_pfault_done(vcpu, inti); | ||
745 | break; | ||
746 | case KVM_S390_INT_VIRTIO: | ||
747 | rc = __deliver_virtio(vcpu, inti); | ||
748 | break; | ||
749 | case KVM_S390_MCHK: | ||
750 | rc = __deliver_mchk_floating(vcpu, inti); | ||
751 | break; | ||
752 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | ||
753 | rc = __deliver_io(vcpu, inti); | ||
754 | break; | ||
755 | default: | ||
756 | BUG(); | ||
757 | } | ||
758 | |||
759 | return rc; | ||
760 | } | ||
761 | |||
762 | /* Check whether an external call is pending (deliverable or not) */ | 776 | /* Check whether an external call is pending (deliverable or not) */ |
763 | int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) | 777 | int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) |
764 | { | 778 | { |
@@ -774,21 +788,9 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) | |||
774 | 788 | ||
775 | int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) | 789 | int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) |
776 | { | 790 | { |
777 | struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; | ||
778 | struct kvm_s390_interrupt_info *inti; | ||
779 | int rc; | 791 | int rc; |
780 | 792 | ||
781 | rc = !!deliverable_local_irqs(vcpu); | 793 | rc = !!deliverable_irqs(vcpu); |
782 | |||
783 | if ((!rc) && atomic_read(&fi->active)) { | ||
784 | spin_lock(&fi->lock); | ||
785 | list_for_each_entry(inti, &fi->list, list) | ||
786 | if (__interrupt_is_deliverable(vcpu, inti)) { | ||
787 | rc = 1; | ||
788 | break; | ||
789 | } | ||
790 | spin_unlock(&fi->lock); | ||
791 | } | ||
792 | 794 | ||
793 | if (!rc && kvm_cpu_has_pending_timer(vcpu)) | 795 | if (!rc && kvm_cpu_has_pending_timer(vcpu)) |
794 | rc = 1; | 796 | rc = 1; |
@@ -907,13 +909,10 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) | |||
907 | int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | 909 | int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) |
908 | { | 910 | { |
909 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 911 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
910 | struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; | ||
911 | struct kvm_s390_interrupt_info *n, *inti = NULL; | ||
912 | deliver_irq_t func; | 912 | deliver_irq_t func; |
913 | int deliver; | ||
914 | int rc = 0; | 913 | int rc = 0; |
915 | unsigned long irq_type; | 914 | unsigned long irq_type; |
916 | unsigned long deliverable_irqs; | 915 | unsigned long irqs; |
917 | 916 | ||
918 | __reset_intercept_indicators(vcpu); | 917 | __reset_intercept_indicators(vcpu); |
919 | 918 | ||
@@ -923,44 +922,27 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
923 | set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); | 922 | set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); |
924 | 923 | ||
925 | do { | 924 | do { |
926 | deliverable_irqs = deliverable_local_irqs(vcpu); | 925 | irqs = deliverable_irqs(vcpu); |
927 | /* bits are in the order of interrupt priority */ | 926 | /* bits are in the order of interrupt priority */ |
928 | irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT); | 927 | irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT); |
929 | if (irq_type == IRQ_PEND_COUNT) | 928 | if (irq_type == IRQ_PEND_COUNT) |
930 | break; | 929 | break; |
931 | func = deliver_irq_funcs[irq_type]; | 930 | if (is_ioirq(irq_type)) { |
932 | if (!func) { | 931 | rc = __deliver_io(vcpu, irq_type); |
933 | WARN_ON_ONCE(func == NULL); | 932 | } else { |
934 | clear_bit(irq_type, &li->pending_irqs); | 933 | func = deliver_irq_funcs[irq_type]; |
935 | continue; | 934 | if (!func) { |
935 | WARN_ON_ONCE(func == NULL); | ||
936 | clear_bit(irq_type, &li->pending_irqs); | ||
937 | continue; | ||
938 | } | ||
939 | rc = func(vcpu); | ||
936 | } | 940 | } |
937 | rc = func(vcpu); | 941 | if (rc) |
938 | } while (!rc && irq_type != IRQ_PEND_COUNT); | 942 | break; |
939 | 943 | } while (!rc); | |
940 | set_intercept_indicators_local(vcpu); | ||
941 | 944 | ||
942 | if (!rc && atomic_read(&fi->active)) { | 945 | set_intercept_indicators(vcpu); |
943 | do { | ||
944 | deliver = 0; | ||
945 | spin_lock(&fi->lock); | ||
946 | list_for_each_entry_safe(inti, n, &fi->list, list) { | ||
947 | if (__interrupt_is_deliverable(vcpu, inti)) { | ||
948 | list_del(&inti->list); | ||
949 | fi->irq_count--; | ||
950 | deliver = 1; | ||
951 | break; | ||
952 | } | ||
953 | __set_intercept_indicator(vcpu, inti); | ||
954 | } | ||
955 | if (list_empty(&fi->list)) | ||
956 | atomic_set(&fi->active, 0); | ||
957 | spin_unlock(&fi->lock); | ||
958 | if (deliver) { | ||
959 | rc = __deliver_floating_interrupt(vcpu, inti); | ||
960 | kfree(inti); | ||
961 | } | ||
962 | } while (!rc && deliver); | ||
963 | } | ||
964 | 946 | ||
965 | return rc; | 947 | return rc; |
966 | } | 948 | } |
@@ -1195,80 +1177,182 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) | |||
1195 | return 0; | 1177 | return 0; |
1196 | } | 1178 | } |
1197 | 1179 | ||
1180 | static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm, | ||
1181 | int isc, u32 schid) | ||
1182 | { | ||
1183 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1184 | struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; | ||
1185 | struct kvm_s390_interrupt_info *iter; | ||
1186 | u16 id = (schid & 0xffff0000U) >> 16; | ||
1187 | u16 nr = schid & 0x0000ffffU; | ||
1198 | 1188 | ||
1189 | spin_lock(&fi->lock); | ||
1190 | list_for_each_entry(iter, isc_list, list) { | ||
1191 | if (schid && (id != iter->io.subchannel_id || | ||
1192 | nr != iter->io.subchannel_nr)) | ||
1193 | continue; | ||
1194 | /* found an appropriate entry */ | ||
1195 | list_del_init(&iter->list); | ||
1196 | fi->counters[FIRQ_CNTR_IO] -= 1; | ||
1197 | if (list_empty(isc_list)) | ||
1198 | clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs); | ||
1199 | spin_unlock(&fi->lock); | ||
1200 | return iter; | ||
1201 | } | ||
1202 | spin_unlock(&fi->lock); | ||
1203 | return NULL; | ||
1204 | } | ||
1205 | |||
1206 | /* | ||
1207 | * Dequeue and return an I/O interrupt matching any of the interruption | ||
1208 | * subclasses as designated by the isc mask in cr6 and the schid (if != 0). | ||
1209 | */ | ||
1199 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, | 1210 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
1200 | u64 cr6, u64 schid) | 1211 | u64 isc_mask, u32 schid) |
1212 | { | ||
1213 | struct kvm_s390_interrupt_info *inti = NULL; | ||
1214 | int isc; | ||
1215 | |||
1216 | for (isc = 0; isc <= MAX_ISC && !inti; isc++) { | ||
1217 | if (isc_mask & isc_to_isc_bits(isc)) | ||
1218 | inti = get_io_int(kvm, isc, schid); | ||
1219 | } | ||
1220 | return inti; | ||
1221 | } | ||
1222 | |||
1223 | #define SCCB_MASK 0xFFFFFFF8 | ||
1224 | #define SCCB_EVENT_PENDING 0x3 | ||
1225 | |||
1226 | static int __inject_service(struct kvm *kvm, | ||
1227 | struct kvm_s390_interrupt_info *inti) | ||
1228 | { | ||
1229 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1230 | |||
1231 | spin_lock(&fi->lock); | ||
1232 | fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING; | ||
1233 | /* | ||
1234 | * Early versions of the QEMU s390 bios will inject several | ||
1235 | * service interrupts after another without handling a | ||
1236 | * condition code indicating busy. | ||
1237 | * We will silently ignore those superfluous sccb values. | ||
1238 | * A future version of QEMU will take care of serialization | ||
1239 | * of servc requests | ||
1240 | */ | ||
1241 | if (fi->srv_signal.ext_params & SCCB_MASK) | ||
1242 | goto out; | ||
1243 | fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK; | ||
1244 | set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); | ||
1245 | out: | ||
1246 | spin_unlock(&fi->lock); | ||
1247 | kfree(inti); | ||
1248 | return 0; | ||
1249 | } | ||
1250 | |||
1251 | static int __inject_virtio(struct kvm *kvm, | ||
1252 | struct kvm_s390_interrupt_info *inti) | ||
1253 | { | ||
1254 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1255 | |||
1256 | spin_lock(&fi->lock); | ||
1257 | if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) { | ||
1258 | spin_unlock(&fi->lock); | ||
1259 | return -EBUSY; | ||
1260 | } | ||
1261 | fi->counters[FIRQ_CNTR_VIRTIO] += 1; | ||
1262 | list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]); | ||
1263 | set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); | ||
1264 | spin_unlock(&fi->lock); | ||
1265 | return 0; | ||
1266 | } | ||
1267 | |||
1268 | static int __inject_pfault_done(struct kvm *kvm, | ||
1269 | struct kvm_s390_interrupt_info *inti) | ||
1270 | { | ||
1271 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1272 | |||
1273 | spin_lock(&fi->lock); | ||
1274 | if (fi->counters[FIRQ_CNTR_PFAULT] >= | ||
1275 | (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) { | ||
1276 | spin_unlock(&fi->lock); | ||
1277 | return -EBUSY; | ||
1278 | } | ||
1279 | fi->counters[FIRQ_CNTR_PFAULT] += 1; | ||
1280 | list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]); | ||
1281 | set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); | ||
1282 | spin_unlock(&fi->lock); | ||
1283 | return 0; | ||
1284 | } | ||
1285 | |||
1286 | #define CR_PENDING_SUBCLASS 28 | ||
1287 | static int __inject_float_mchk(struct kvm *kvm, | ||
1288 | struct kvm_s390_interrupt_info *inti) | ||
1289 | { | ||
1290 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1291 | |||
1292 | spin_lock(&fi->lock); | ||
1293 | fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS); | ||
1294 | fi->mchk.mcic |= inti->mchk.mcic; | ||
1295 | set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs); | ||
1296 | spin_unlock(&fi->lock); | ||
1297 | kfree(inti); | ||
1298 | return 0; | ||
1299 | } | ||
1300 | |||
1301 | static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | ||
1201 | { | 1302 | { |
1202 | struct kvm_s390_float_interrupt *fi; | 1303 | struct kvm_s390_float_interrupt *fi; |
1203 | struct kvm_s390_interrupt_info *inti, *iter; | 1304 | struct list_head *list; |
1305 | int isc; | ||
1204 | 1306 | ||
1205 | if ((!schid && !cr6) || (schid && cr6)) | ||
1206 | return NULL; | ||
1207 | fi = &kvm->arch.float_int; | 1307 | fi = &kvm->arch.float_int; |
1208 | spin_lock(&fi->lock); | 1308 | spin_lock(&fi->lock); |
1209 | inti = NULL; | 1309 | if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) { |
1210 | list_for_each_entry(iter, &fi->list, list) { | 1310 | spin_unlock(&fi->lock); |
1211 | if (!is_ioint(iter->type)) | 1311 | return -EBUSY; |
1212 | continue; | ||
1213 | if (cr6 && | ||
1214 | ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0)) | ||
1215 | continue; | ||
1216 | if (schid) { | ||
1217 | if (((schid & 0x00000000ffff0000) >> 16) != | ||
1218 | iter->io.subchannel_id) | ||
1219 | continue; | ||
1220 | if ((schid & 0x000000000000ffff) != | ||
1221 | iter->io.subchannel_nr) | ||
1222 | continue; | ||
1223 | } | ||
1224 | inti = iter; | ||
1225 | break; | ||
1226 | } | ||
1227 | if (inti) { | ||
1228 | list_del_init(&inti->list); | ||
1229 | fi->irq_count--; | ||
1230 | } | 1312 | } |
1231 | if (list_empty(&fi->list)) | 1313 | fi->counters[FIRQ_CNTR_IO] += 1; |
1232 | atomic_set(&fi->active, 0); | 1314 | |
1315 | isc = int_word_to_isc(inti->io.io_int_word); | ||
1316 | list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; | ||
1317 | list_add_tail(&inti->list, list); | ||
1318 | set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs); | ||
1233 | spin_unlock(&fi->lock); | 1319 | spin_unlock(&fi->lock); |
1234 | return inti; | 1320 | return 0; |
1235 | } | 1321 | } |
1236 | 1322 | ||
1237 | static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | 1323 | static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) |
1238 | { | 1324 | { |
1239 | struct kvm_s390_local_interrupt *li; | 1325 | struct kvm_s390_local_interrupt *li; |
1240 | struct kvm_s390_float_interrupt *fi; | 1326 | struct kvm_s390_float_interrupt *fi; |
1241 | struct kvm_s390_interrupt_info *iter; | ||
1242 | struct kvm_vcpu *dst_vcpu = NULL; | 1327 | struct kvm_vcpu *dst_vcpu = NULL; |
1243 | int sigcpu; | 1328 | int sigcpu; |
1244 | int rc = 0; | 1329 | u64 type = READ_ONCE(inti->type); |
1330 | int rc; | ||
1245 | 1331 | ||
1246 | fi = &kvm->arch.float_int; | 1332 | fi = &kvm->arch.float_int; |
1247 | spin_lock(&fi->lock); | 1333 | |
1248 | if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) { | 1334 | switch (type) { |
1335 | case KVM_S390_MCHK: | ||
1336 | rc = __inject_float_mchk(kvm, inti); | ||
1337 | break; | ||
1338 | case KVM_S390_INT_VIRTIO: | ||
1339 | rc = __inject_virtio(kvm, inti); | ||
1340 | break; | ||
1341 | case KVM_S390_INT_SERVICE: | ||
1342 | rc = __inject_service(kvm, inti); | ||
1343 | break; | ||
1344 | case KVM_S390_INT_PFAULT_DONE: | ||
1345 | rc = __inject_pfault_done(kvm, inti); | ||
1346 | break; | ||
1347 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | ||
1348 | rc = __inject_io(kvm, inti); | ||
1349 | break; | ||
1350 | default: | ||
1249 | rc = -EINVAL; | 1351 | rc = -EINVAL; |
1250 | goto unlock_fi; | ||
1251 | } | 1352 | } |
1252 | fi->irq_count++; | 1353 | if (rc) |
1253 | if (!is_ioint(inti->type)) { | 1354 | return rc; |
1254 | list_add_tail(&inti->list, &fi->list); | ||
1255 | } else { | ||
1256 | u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); | ||
1257 | 1355 | ||
1258 | /* Keep I/O interrupts sorted in isc order. */ | ||
1259 | list_for_each_entry(iter, &fi->list, list) { | ||
1260 | if (!is_ioint(iter->type)) | ||
1261 | continue; | ||
1262 | if (int_word_to_isc_bits(iter->io.io_int_word) | ||
1263 | <= isc_bits) | ||
1264 | continue; | ||
1265 | break; | ||
1266 | } | ||
1267 | list_add_tail(&inti->list, &iter->list); | ||
1268 | } | ||
1269 | atomic_set(&fi->active, 1); | ||
1270 | if (atomic_read(&kvm->online_vcpus) == 0) | ||
1271 | goto unlock_fi; | ||
1272 | sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); | 1356 | sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); |
1273 | if (sigcpu == KVM_MAX_VCPUS) { | 1357 | if (sigcpu == KVM_MAX_VCPUS) { |
1274 | do { | 1358 | do { |
@@ -1280,7 +1364,7 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | |||
1280 | dst_vcpu = kvm_get_vcpu(kvm, sigcpu); | 1364 | dst_vcpu = kvm_get_vcpu(kvm, sigcpu); |
1281 | li = &dst_vcpu->arch.local_int; | 1365 | li = &dst_vcpu->arch.local_int; |
1282 | spin_lock(&li->lock); | 1366 | spin_lock(&li->lock); |
1283 | switch (inti->type) { | 1367 | switch (type) { |
1284 | case KVM_S390_MCHK: | 1368 | case KVM_S390_MCHK: |
1285 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); | 1369 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); |
1286 | break; | 1370 | break; |
@@ -1293,9 +1377,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | |||
1293 | } | 1377 | } |
1294 | spin_unlock(&li->lock); | 1378 | spin_unlock(&li->lock); |
1295 | kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); | 1379 | kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); |
1296 | unlock_fi: | 1380 | return 0; |
1297 | spin_unlock(&fi->lock); | 1381 | |
1298 | return rc; | ||
1299 | } | 1382 | } |
1300 | 1383 | ||
1301 | int kvm_s390_inject_vm(struct kvm *kvm, | 1384 | int kvm_s390_inject_vm(struct kvm *kvm, |
@@ -1462,20 +1545,14 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | |||
1462 | return rc; | 1545 | return rc; |
1463 | } | 1546 | } |
1464 | 1547 | ||
1465 | void kvm_s390_clear_float_irqs(struct kvm *kvm) | 1548 | static inline void clear_irq_list(struct list_head *_list) |
1466 | { | 1549 | { |
1467 | struct kvm_s390_float_interrupt *fi; | 1550 | struct kvm_s390_interrupt_info *inti, *n; |
1468 | struct kvm_s390_interrupt_info *n, *inti = NULL; | ||
1469 | 1551 | ||
1470 | fi = &kvm->arch.float_int; | 1552 | list_for_each_entry_safe(inti, n, _list, list) { |
1471 | spin_lock(&fi->lock); | ||
1472 | list_for_each_entry_safe(inti, n, &fi->list, list) { | ||
1473 | list_del(&inti->list); | 1553 | list_del(&inti->list); |
1474 | kfree(inti); | 1554 | kfree(inti); |
1475 | } | 1555 | } |
1476 | fi->irq_count = 0; | ||
1477 | atomic_set(&fi->active, 0); | ||
1478 | spin_unlock(&fi->lock); | ||
1479 | } | 1556 | } |
1480 | 1557 | ||
1481 | static void inti_to_irq(struct kvm_s390_interrupt_info *inti, | 1558 | static void inti_to_irq(struct kvm_s390_interrupt_info *inti, |
@@ -1486,26 +1563,37 @@ static void inti_to_irq(struct kvm_s390_interrupt_info *inti, | |||
1486 | case KVM_S390_INT_PFAULT_INIT: | 1563 | case KVM_S390_INT_PFAULT_INIT: |
1487 | case KVM_S390_INT_PFAULT_DONE: | 1564 | case KVM_S390_INT_PFAULT_DONE: |
1488 | case KVM_S390_INT_VIRTIO: | 1565 | case KVM_S390_INT_VIRTIO: |
1489 | case KVM_S390_INT_SERVICE: | ||
1490 | irq->u.ext = inti->ext; | 1566 | irq->u.ext = inti->ext; |
1491 | break; | 1567 | break; |
1492 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | 1568 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
1493 | irq->u.io = inti->io; | 1569 | irq->u.io = inti->io; |
1494 | break; | 1570 | break; |
1495 | case KVM_S390_MCHK: | ||
1496 | irq->u.mchk = inti->mchk; | ||
1497 | break; | ||
1498 | } | 1571 | } |
1499 | } | 1572 | } |
1500 | 1573 | ||
1574 | void kvm_s390_clear_float_irqs(struct kvm *kvm) | ||
1575 | { | ||
1576 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1577 | int i; | ||
1578 | |||
1579 | spin_lock(&fi->lock); | ||
1580 | for (i = 0; i < FIRQ_LIST_COUNT; i++) | ||
1581 | clear_irq_list(&fi->lists[i]); | ||
1582 | for (i = 0; i < FIRQ_MAX_COUNT; i++) | ||
1583 | fi->counters[i] = 0; | ||
1584 | spin_unlock(&fi->lock); | ||
1585 | }; | ||
1586 | |||
1501 | static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) | 1587 | static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) |
1502 | { | 1588 | { |
1503 | struct kvm_s390_interrupt_info *inti; | 1589 | struct kvm_s390_interrupt_info *inti; |
1504 | struct kvm_s390_float_interrupt *fi; | 1590 | struct kvm_s390_float_interrupt *fi; |
1505 | struct kvm_s390_irq *buf; | 1591 | struct kvm_s390_irq *buf; |
1592 | struct kvm_s390_irq *irq; | ||
1506 | int max_irqs; | 1593 | int max_irqs; |
1507 | int ret = 0; | 1594 | int ret = 0; |
1508 | int n = 0; | 1595 | int n = 0; |
1596 | int i; | ||
1509 | 1597 | ||
1510 | if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0) | 1598 | if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0) |
1511 | return -EINVAL; | 1599 | return -EINVAL; |
@@ -1523,15 +1611,41 @@ static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) | |||
1523 | 1611 | ||
1524 | fi = &kvm->arch.float_int; | 1612 | fi = &kvm->arch.float_int; |
1525 | spin_lock(&fi->lock); | 1613 | spin_lock(&fi->lock); |
1526 | list_for_each_entry(inti, &fi->list, list) { | 1614 | for (i = 0; i < FIRQ_LIST_COUNT; i++) { |
1615 | list_for_each_entry(inti, &fi->lists[i], list) { | ||
1616 | if (n == max_irqs) { | ||
1617 | /* signal userspace to try again */ | ||
1618 | ret = -ENOMEM; | ||
1619 | goto out; | ||
1620 | } | ||
1621 | inti_to_irq(inti, &buf[n]); | ||
1622 | n++; | ||
1623 | } | ||
1624 | } | ||
1625 | if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) { | ||
1527 | if (n == max_irqs) { | 1626 | if (n == max_irqs) { |
1528 | /* signal userspace to try again */ | 1627 | /* signal userspace to try again */ |
1529 | ret = -ENOMEM; | 1628 | ret = -ENOMEM; |
1530 | break; | 1629 | goto out; |
1531 | } | 1630 | } |
1532 | inti_to_irq(inti, &buf[n]); | 1631 | irq = (struct kvm_s390_irq *) &buf[n]; |
1632 | irq->type = KVM_S390_INT_SERVICE; | ||
1633 | irq->u.ext = fi->srv_signal; | ||
1533 | n++; | 1634 | n++; |
1534 | } | 1635 | } |
1636 | if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { | ||
1637 | if (n == max_irqs) { | ||
1638 | /* signal userspace to try again */ | ||
1639 | ret = -ENOMEM; | ||
1640 | goto out; | ||
1641 | } | ||
1642 | irq = (struct kvm_s390_irq *) &buf[n]; | ||
1643 | irq->type = KVM_S390_MCHK; | ||
1644 | irq->u.mchk = fi->mchk; | ||
1645 | n++; | ||
1646 | } | ||
1647 | |||
1648 | out: | ||
1535 | spin_unlock(&fi->lock); | 1649 | spin_unlock(&fi->lock); |
1536 | if (!ret && n > 0) { | 1650 | if (!ret && n > 0) { |
1537 | if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n)) | 1651 | if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n)) |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index a1308859887d..dbc9ca34d9da 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/pgtable.h> | 31 | #include <asm/pgtable.h> |
32 | #include <asm/nmi.h> | 32 | #include <asm/nmi.h> |
33 | #include <asm/switch_to.h> | 33 | #include <asm/switch_to.h> |
34 | #include <asm/isc.h> | ||
34 | #include <asm/sclp.h> | 35 | #include <asm/sclp.h> |
35 | #include "kvm-s390.h" | 36 | #include "kvm-s390.h" |
36 | #include "gaccess.h" | 37 | #include "gaccess.h" |
@@ -1069,7 +1070,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
1069 | goto out_err; | 1070 | goto out_err; |
1070 | 1071 | ||
1071 | spin_lock_init(&kvm->arch.float_int.lock); | 1072 | spin_lock_init(&kvm->arch.float_int.lock); |
1072 | INIT_LIST_HEAD(&kvm->arch.float_int.list); | 1073 | for (i = 0; i < FIRQ_LIST_COUNT; i++) |
1074 | INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); | ||
1073 | init_waitqueue_head(&kvm->arch.ipte_wq); | 1075 | init_waitqueue_head(&kvm->arch.ipte_wq); |
1074 | mutex_init(&kvm->arch.ipte_mutex); | 1076 | mutex_init(&kvm->arch.ipte_mutex); |
1075 | 1077 | ||
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index c5aefef158e5..343644a59392 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -178,7 +178,7 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
178 | struct kvm_s390_irq *irq); | 178 | struct kvm_s390_irq *irq); |
179 | int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); | 179 | int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); |
180 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, | 180 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
181 | u64 cr6, u64 schid); | 181 | u64 isc_mask, u32 schid); |
182 | int kvm_s390_reinject_io_int(struct kvm *kvm, | 182 | int kvm_s390_reinject_io_int(struct kvm *kvm, |
183 | struct kvm_s390_interrupt_info *inti); | 183 | struct kvm_s390_interrupt_info *inti); |
184 | int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); | 184 | int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 5e4658d20c77..d22d8ee1ff9d 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -294,10 +294,13 @@ reinject_interrupt: | |||
294 | 294 | ||
295 | static int handle_tsch(struct kvm_vcpu *vcpu) | 295 | static int handle_tsch(struct kvm_vcpu *vcpu) |
296 | { | 296 | { |
297 | struct kvm_s390_interrupt_info *inti; | 297 | struct kvm_s390_interrupt_info *inti = NULL; |
298 | const u64 isc_mask = 0xffUL << 24; /* all iscs set */ | ||
298 | 299 | ||
299 | inti = kvm_s390_get_io_int(vcpu->kvm, 0, | 300 | /* a valid schid has at least one bit set */ |
300 | vcpu->run->s.regs.gprs[1]); | 301 | if (vcpu->run->s.regs.gprs[1]) |
302 | inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, | ||
303 | vcpu->run->s.regs.gprs[1]); | ||
301 | 304 | ||
302 | /* | 305 | /* |
303 | * Prepare exit to userspace. | 306 | * Prepare exit to userspace. |