diff options
Diffstat (limited to 'arch/s390/kvm/interrupt.c')
-rw-r--r-- | arch/s390/kvm/interrupt.c | 592 |
1 files changed, 592 insertions, 0 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c new file mode 100644 index 000000000000..fcd1ed8015c1 --- /dev/null +++ b/arch/s390/kvm/interrupt.c | |||
@@ -0,0 +1,592 @@ | |||
1 | /* | ||
2 | * interrupt.c - handling kvm guest interrupts | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License (version 2 only) | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * Author(s): Carsten Otte <cotte@de.ibm.com> | ||
11 | */ | ||
12 | |||
13 | #include <asm/lowcore.h> | ||
14 | #include <asm/uaccess.h> | ||
15 | #include <linux/kvm_host.h> | ||
16 | #include "kvm-s390.h" | ||
17 | #include "gaccess.h" | ||
18 | |||
19 | static int psw_extint_disabled(struct kvm_vcpu *vcpu) | ||
20 | { | ||
21 | return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); | ||
22 | } | ||
23 | |||
24 | static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) | ||
25 | { | ||
26 | if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || | ||
27 | (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || | ||
28 | (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) | ||
29 | return 0; | ||
30 | return 1; | ||
31 | } | ||
32 | |||
33 | static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, | ||
34 | struct interrupt_info *inti) | ||
35 | { | ||
36 | switch (inti->type) { | ||
37 | case KVM_S390_INT_EMERGENCY: | ||
38 | if (psw_extint_disabled(vcpu)) | ||
39 | return 0; | ||
40 | if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) | ||
41 | return 1; | ||
42 | return 0; | ||
43 | case KVM_S390_INT_SERVICE: | ||
44 | if (psw_extint_disabled(vcpu)) | ||
45 | return 0; | ||
46 | if (vcpu->arch.sie_block->gcr[0] & 0x200ul) | ||
47 | return 1; | ||
48 | return 0; | ||
49 | case KVM_S390_INT_VIRTIO: | ||
50 | if (psw_extint_disabled(vcpu)) | ||
51 | return 0; | ||
52 | if (vcpu->arch.sie_block->gcr[0] & 0x200ul) | ||
53 | return 1; | ||
54 | return 0; | ||
55 | case KVM_S390_PROGRAM_INT: | ||
56 | case KVM_S390_SIGP_STOP: | ||
57 | case KVM_S390_SIGP_SET_PREFIX: | ||
58 | case KVM_S390_RESTART: | ||
59 | return 1; | ||
60 | default: | ||
61 | BUG(); | ||
62 | } | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static void __set_cpu_idle(struct kvm_vcpu *vcpu) | ||
67 | { | ||
68 | BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); | ||
69 | atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); | ||
70 | set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); | ||
71 | } | ||
72 | |||
73 | static void __unset_cpu_idle(struct kvm_vcpu *vcpu) | ||
74 | { | ||
75 | BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); | ||
76 | atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); | ||
77 | clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); | ||
78 | } | ||
79 | |||
80 | static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) | ||
81 | { | ||
82 | atomic_clear_mask(CPUSTAT_ECALL_PEND | | ||
83 | CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, | ||
84 | &vcpu->arch.sie_block->cpuflags); | ||
85 | vcpu->arch.sie_block->lctl = 0x0000; | ||
86 | } | ||
87 | |||
88 | static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) | ||
89 | { | ||
90 | atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); | ||
91 | } | ||
92 | |||
93 | static void __set_intercept_indicator(struct kvm_vcpu *vcpu, | ||
94 | struct interrupt_info *inti) | ||
95 | { | ||
96 | switch (inti->type) { | ||
97 | case KVM_S390_INT_EMERGENCY: | ||
98 | case KVM_S390_INT_SERVICE: | ||
99 | case KVM_S390_INT_VIRTIO: | ||
100 | if (psw_extint_disabled(vcpu)) | ||
101 | __set_cpuflag(vcpu, CPUSTAT_EXT_INT); | ||
102 | else | ||
103 | vcpu->arch.sie_block->lctl |= LCTL_CR0; | ||
104 | break; | ||
105 | case KVM_S390_SIGP_STOP: | ||
106 | __set_cpuflag(vcpu, CPUSTAT_STOP_INT); | ||
107 | break; | ||
108 | default: | ||
109 | BUG(); | ||
110 | } | ||
111 | } | ||
112 | |||
113 | static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | ||
114 | struct interrupt_info *inti) | ||
115 | { | ||
116 | const unsigned short table[] = { 2, 4, 4, 6 }; | ||
117 | int rc, exception = 0; | ||
118 | |||
119 | switch (inti->type) { | ||
120 | case KVM_S390_INT_EMERGENCY: | ||
121 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); | ||
122 | vcpu->stat.deliver_emergency_signal++; | ||
123 | rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201); | ||
124 | if (rc == -EFAULT) | ||
125 | exception = 1; | ||
126 | |||
127 | rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | ||
128 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
129 | if (rc == -EFAULT) | ||
130 | exception = 1; | ||
131 | |||
132 | rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
133 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | ||
134 | if (rc == -EFAULT) | ||
135 | exception = 1; | ||
136 | break; | ||
137 | |||
138 | case KVM_S390_INT_SERVICE: | ||
139 | VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", | ||
140 | inti->ext.ext_params); | ||
141 | vcpu->stat.deliver_service_signal++; | ||
142 | rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401); | ||
143 | if (rc == -EFAULT) | ||
144 | exception = 1; | ||
145 | |||
146 | rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | ||
147 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
148 | if (rc == -EFAULT) | ||
149 | exception = 1; | ||
150 | |||
151 | rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
152 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | ||
153 | if (rc == -EFAULT) | ||
154 | exception = 1; | ||
155 | |||
156 | rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); | ||
157 | if (rc == -EFAULT) | ||
158 | exception = 1; | ||
159 | break; | ||
160 | |||
161 | case KVM_S390_INT_VIRTIO: | ||
162 | VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%lx", | ||
163 | inti->ext.ext_params, inti->ext.ext_params2); | ||
164 | vcpu->stat.deliver_virtio_interrupt++; | ||
165 | rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603); | ||
166 | if (rc == -EFAULT) | ||
167 | exception = 1; | ||
168 | |||
169 | rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, 0x0d00); | ||
170 | if (rc == -EFAULT) | ||
171 | exception = 1; | ||
172 | |||
173 | rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | ||
174 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
175 | if (rc == -EFAULT) | ||
176 | exception = 1; | ||
177 | |||
178 | rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
179 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | ||
180 | if (rc == -EFAULT) | ||
181 | exception = 1; | ||
182 | |||
183 | rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); | ||
184 | if (rc == -EFAULT) | ||
185 | exception = 1; | ||
186 | |||
187 | rc = put_guest_u64(vcpu, __LC_PFAULT_INTPARM, | ||
188 | inti->ext.ext_params2); | ||
189 | if (rc == -EFAULT) | ||
190 | exception = 1; | ||
191 | break; | ||
192 | |||
193 | case KVM_S390_SIGP_STOP: | ||
194 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); | ||
195 | vcpu->stat.deliver_stop_signal++; | ||
196 | __set_intercept_indicator(vcpu, inti); | ||
197 | break; | ||
198 | |||
199 | case KVM_S390_SIGP_SET_PREFIX: | ||
200 | VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", | ||
201 | inti->prefix.address); | ||
202 | vcpu->stat.deliver_prefix_signal++; | ||
203 | vcpu->arch.sie_block->prefix = inti->prefix.address; | ||
204 | vcpu->arch.sie_block->ihcpu = 0xffff; | ||
205 | break; | ||
206 | |||
207 | case KVM_S390_RESTART: | ||
208 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); | ||
209 | vcpu->stat.deliver_restart_signal++; | ||
210 | rc = copy_to_guest(vcpu, offsetof(struct _lowcore, | ||
211 | restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
212 | if (rc == -EFAULT) | ||
213 | exception = 1; | ||
214 | |||
215 | rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
216 | offsetof(struct _lowcore, restart_psw), sizeof(psw_t)); | ||
217 | if (rc == -EFAULT) | ||
218 | exception = 1; | ||
219 | break; | ||
220 | |||
221 | case KVM_S390_PROGRAM_INT: | ||
222 | VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", | ||
223 | inti->pgm.code, | ||
224 | table[vcpu->arch.sie_block->ipa >> 14]); | ||
225 | vcpu->stat.deliver_program_int++; | ||
226 | rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code); | ||
227 | if (rc == -EFAULT) | ||
228 | exception = 1; | ||
229 | |||
230 | rc = put_guest_u16(vcpu, __LC_PGM_ILC, | ||
231 | table[vcpu->arch.sie_block->ipa >> 14]); | ||
232 | if (rc == -EFAULT) | ||
233 | exception = 1; | ||
234 | |||
235 | rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW, | ||
236 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
237 | if (rc == -EFAULT) | ||
238 | exception = 1; | ||
239 | |||
240 | rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
241 | __LC_PGM_NEW_PSW, sizeof(psw_t)); | ||
242 | if (rc == -EFAULT) | ||
243 | exception = 1; | ||
244 | break; | ||
245 | |||
246 | default: | ||
247 | BUG(); | ||
248 | } | ||
249 | |||
250 | if (exception) { | ||
251 | VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" | ||
252 | " interrupt"); | ||
253 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
254 | if (inti->type == KVM_S390_PROGRAM_INT) { | ||
255 | printk(KERN_WARNING "kvm: recursive program check\n"); | ||
256 | BUG(); | ||
257 | } | ||
258 | } | ||
259 | } | ||
260 | |||
261 | static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) | ||
262 | { | ||
263 | int rc, exception = 0; | ||
264 | |||
265 | if (psw_extint_disabled(vcpu)) | ||
266 | return 0; | ||
267 | if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) | ||
268 | return 0; | ||
269 | rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004); | ||
270 | if (rc == -EFAULT) | ||
271 | exception = 1; | ||
272 | rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | ||
273 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
274 | if (rc == -EFAULT) | ||
275 | exception = 1; | ||
276 | rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
277 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | ||
278 | if (rc == -EFAULT) | ||
279 | exception = 1; | ||
280 | |||
281 | if (exception) { | ||
282 | VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" \ | ||
283 | " ckc interrupt"); | ||
284 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | return 1; | ||
289 | } | ||
290 | |||
291 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | ||
292 | { | ||
293 | struct local_interrupt *li = &vcpu->arch.local_int; | ||
294 | struct float_interrupt *fi = vcpu->arch.local_int.float_int; | ||
295 | struct interrupt_info *inti; | ||
296 | int rc = 0; | ||
297 | |||
298 | if (atomic_read(&li->active)) { | ||
299 | spin_lock_bh(&li->lock); | ||
300 | list_for_each_entry(inti, &li->list, list) | ||
301 | if (__interrupt_is_deliverable(vcpu, inti)) { | ||
302 | rc = 1; | ||
303 | break; | ||
304 | } | ||
305 | spin_unlock_bh(&li->lock); | ||
306 | } | ||
307 | |||
308 | if ((!rc) && atomic_read(&fi->active)) { | ||
309 | spin_lock_bh(&fi->lock); | ||
310 | list_for_each_entry(inti, &fi->list, list) | ||
311 | if (__interrupt_is_deliverable(vcpu, inti)) { | ||
312 | rc = 1; | ||
313 | break; | ||
314 | } | ||
315 | spin_unlock_bh(&fi->lock); | ||
316 | } | ||
317 | |||
318 | if ((!rc) && (vcpu->arch.sie_block->ckc < | ||
319 | get_clock() + vcpu->arch.sie_block->epoch)) { | ||
320 | if ((!psw_extint_disabled(vcpu)) && | ||
321 | (vcpu->arch.sie_block->gcr[0] & 0x800ul)) | ||
322 | rc = 1; | ||
323 | } | ||
324 | |||
325 | return rc; | ||
326 | } | ||
327 | |||
328 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | ||
329 | { | ||
330 | return 0; | ||
331 | } | ||
332 | |||
333 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | ||
334 | { | ||
335 | u64 now, sltime; | ||
336 | DECLARE_WAITQUEUE(wait, current); | ||
337 | |||
338 | vcpu->stat.exit_wait_state++; | ||
339 | if (kvm_cpu_has_interrupt(vcpu)) | ||
340 | return 0; | ||
341 | |||
342 | if (psw_interrupts_disabled(vcpu)) { | ||
343 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); | ||
344 | __unset_cpu_idle(vcpu); | ||
345 | return -ENOTSUPP; /* disabled wait */ | ||
346 | } | ||
347 | |||
348 | if (psw_extint_disabled(vcpu) || | ||
349 | (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) { | ||
350 | VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); | ||
351 | goto no_timer; | ||
352 | } | ||
353 | |||
354 | now = get_clock() + vcpu->arch.sie_block->epoch; | ||
355 | if (vcpu->arch.sie_block->ckc < now) { | ||
356 | __unset_cpu_idle(vcpu); | ||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | sltime = (vcpu->arch.sie_block->ckc - now) / (0xf4240000ul / HZ) + 1; | ||
361 | |||
362 | vcpu->arch.ckc_timer.expires = jiffies + sltime; | ||
363 | |||
364 | add_timer(&vcpu->arch.ckc_timer); | ||
365 | VCPU_EVENT(vcpu, 5, "enabled wait timer:%lx jiffies", sltime); | ||
366 | no_timer: | ||
367 | spin_lock_bh(&vcpu->arch.local_int.float_int->lock); | ||
368 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
369 | __set_cpu_idle(vcpu); | ||
370 | vcpu->arch.local_int.timer_due = 0; | ||
371 | add_wait_queue(&vcpu->arch.local_int.wq, &wait); | ||
372 | while (list_empty(&vcpu->arch.local_int.list) && | ||
373 | list_empty(&vcpu->arch.local_int.float_int->list) && | ||
374 | (!vcpu->arch.local_int.timer_due) && | ||
375 | !signal_pending(current)) { | ||
376 | set_current_state(TASK_INTERRUPTIBLE); | ||
377 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
378 | spin_unlock_bh(&vcpu->arch.local_int.float_int->lock); | ||
379 | vcpu_put(vcpu); | ||
380 | schedule(); | ||
381 | vcpu_load(vcpu); | ||
382 | spin_lock_bh(&vcpu->arch.local_int.float_int->lock); | ||
383 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
384 | } | ||
385 | __unset_cpu_idle(vcpu); | ||
386 | __set_current_state(TASK_RUNNING); | ||
387 | remove_wait_queue(&vcpu->wq, &wait); | ||
388 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
389 | spin_unlock_bh(&vcpu->arch.local_int.float_int->lock); | ||
390 | del_timer(&vcpu->arch.ckc_timer); | ||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | void kvm_s390_idle_wakeup(unsigned long data) | ||
395 | { | ||
396 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | ||
397 | |||
398 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
399 | vcpu->arch.local_int.timer_due = 1; | ||
400 | if (waitqueue_active(&vcpu->arch.local_int.wq)) | ||
401 | wake_up_interruptible(&vcpu->arch.local_int.wq); | ||
402 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
403 | } | ||
404 | |||
405 | |||
406 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | ||
407 | { | ||
408 | struct local_interrupt *li = &vcpu->arch.local_int; | ||
409 | struct float_interrupt *fi = vcpu->arch.local_int.float_int; | ||
410 | struct interrupt_info *n, *inti = NULL; | ||
411 | int deliver; | ||
412 | |||
413 | __reset_intercept_indicators(vcpu); | ||
414 | if (atomic_read(&li->active)) { | ||
415 | do { | ||
416 | deliver = 0; | ||
417 | spin_lock_bh(&li->lock); | ||
418 | list_for_each_entry_safe(inti, n, &li->list, list) { | ||
419 | if (__interrupt_is_deliverable(vcpu, inti)) { | ||
420 | list_del(&inti->list); | ||
421 | deliver = 1; | ||
422 | break; | ||
423 | } | ||
424 | __set_intercept_indicator(vcpu, inti); | ||
425 | } | ||
426 | if (list_empty(&li->list)) | ||
427 | atomic_set(&li->active, 0); | ||
428 | spin_unlock_bh(&li->lock); | ||
429 | if (deliver) { | ||
430 | __do_deliver_interrupt(vcpu, inti); | ||
431 | kfree(inti); | ||
432 | } | ||
433 | } while (deliver); | ||
434 | } | ||
435 | |||
436 | if ((vcpu->arch.sie_block->ckc < | ||
437 | get_clock() + vcpu->arch.sie_block->epoch)) | ||
438 | __try_deliver_ckc_interrupt(vcpu); | ||
439 | |||
440 | if (atomic_read(&fi->active)) { | ||
441 | do { | ||
442 | deliver = 0; | ||
443 | spin_lock_bh(&fi->lock); | ||
444 | list_for_each_entry_safe(inti, n, &fi->list, list) { | ||
445 | if (__interrupt_is_deliverable(vcpu, inti)) { | ||
446 | list_del(&inti->list); | ||
447 | deliver = 1; | ||
448 | break; | ||
449 | } | ||
450 | __set_intercept_indicator(vcpu, inti); | ||
451 | } | ||
452 | if (list_empty(&fi->list)) | ||
453 | atomic_set(&fi->active, 0); | ||
454 | spin_unlock_bh(&fi->lock); | ||
455 | if (deliver) { | ||
456 | __do_deliver_interrupt(vcpu, inti); | ||
457 | kfree(inti); | ||
458 | } | ||
459 | } while (deliver); | ||
460 | } | ||
461 | } | ||
462 | |||
463 | int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) | ||
464 | { | ||
465 | struct local_interrupt *li = &vcpu->arch.local_int; | ||
466 | struct interrupt_info *inti; | ||
467 | |||
468 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
469 | if (!inti) | ||
470 | return -ENOMEM; | ||
471 | |||
472 | inti->type = KVM_S390_PROGRAM_INT;; | ||
473 | inti->pgm.code = code; | ||
474 | |||
475 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); | ||
476 | spin_lock_bh(&li->lock); | ||
477 | list_add(&inti->list, &li->list); | ||
478 | atomic_set(&li->active, 1); | ||
479 | BUG_ON(waitqueue_active(&li->wq)); | ||
480 | spin_unlock_bh(&li->lock); | ||
481 | return 0; | ||
482 | } | ||
483 | |||
484 | int kvm_s390_inject_vm(struct kvm *kvm, | ||
485 | struct kvm_s390_interrupt *s390int) | ||
486 | { | ||
487 | struct local_interrupt *li; | ||
488 | struct float_interrupt *fi; | ||
489 | struct interrupt_info *inti; | ||
490 | int sigcpu; | ||
491 | |||
492 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
493 | if (!inti) | ||
494 | return -ENOMEM; | ||
495 | |||
496 | switch (s390int->type) { | ||
497 | case KVM_S390_INT_VIRTIO: | ||
498 | VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%lx", | ||
499 | s390int->parm, s390int->parm64); | ||
500 | inti->type = s390int->type; | ||
501 | inti->ext.ext_params = s390int->parm; | ||
502 | inti->ext.ext_params2 = s390int->parm64; | ||
503 | break; | ||
504 | case KVM_S390_INT_SERVICE: | ||
505 | VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); | ||
506 | inti->type = s390int->type; | ||
507 | inti->ext.ext_params = s390int->parm; | ||
508 | break; | ||
509 | case KVM_S390_PROGRAM_INT: | ||
510 | case KVM_S390_SIGP_STOP: | ||
511 | case KVM_S390_INT_EMERGENCY: | ||
512 | default: | ||
513 | kfree(inti); | ||
514 | return -EINVAL; | ||
515 | } | ||
516 | |||
517 | mutex_lock(&kvm->lock); | ||
518 | fi = &kvm->arch.float_int; | ||
519 | spin_lock_bh(&fi->lock); | ||
520 | list_add_tail(&inti->list, &fi->list); | ||
521 | atomic_set(&fi->active, 1); | ||
522 | sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); | ||
523 | if (sigcpu == KVM_MAX_VCPUS) { | ||
524 | do { | ||
525 | sigcpu = fi->next_rr_cpu++; | ||
526 | if (sigcpu == KVM_MAX_VCPUS) | ||
527 | sigcpu = fi->next_rr_cpu = 0; | ||
528 | } while (fi->local_int[sigcpu] == NULL); | ||
529 | } | ||
530 | li = fi->local_int[sigcpu]; | ||
531 | spin_lock_bh(&li->lock); | ||
532 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | ||
533 | if (waitqueue_active(&li->wq)) | ||
534 | wake_up_interruptible(&li->wq); | ||
535 | spin_unlock_bh(&li->lock); | ||
536 | spin_unlock_bh(&fi->lock); | ||
537 | mutex_unlock(&kvm->lock); | ||
538 | return 0; | ||
539 | } | ||
540 | |||
541 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | ||
542 | struct kvm_s390_interrupt *s390int) | ||
543 | { | ||
544 | struct local_interrupt *li; | ||
545 | struct interrupt_info *inti; | ||
546 | |||
547 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
548 | if (!inti) | ||
549 | return -ENOMEM; | ||
550 | |||
551 | switch (s390int->type) { | ||
552 | case KVM_S390_PROGRAM_INT: | ||
553 | if (s390int->parm & 0xffff0000) { | ||
554 | kfree(inti); | ||
555 | return -EINVAL; | ||
556 | } | ||
557 | inti->type = s390int->type; | ||
558 | inti->pgm.code = s390int->parm; | ||
559 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", | ||
560 | s390int->parm); | ||
561 | break; | ||
562 | case KVM_S390_SIGP_STOP: | ||
563 | case KVM_S390_RESTART: | ||
564 | case KVM_S390_SIGP_SET_PREFIX: | ||
565 | case KVM_S390_INT_EMERGENCY: | ||
566 | VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); | ||
567 | inti->type = s390int->type; | ||
568 | break; | ||
569 | case KVM_S390_INT_VIRTIO: | ||
570 | case KVM_S390_INT_SERVICE: | ||
571 | default: | ||
572 | kfree(inti); | ||
573 | return -EINVAL; | ||
574 | } | ||
575 | |||
576 | mutex_lock(&vcpu->kvm->lock); | ||
577 | li = &vcpu->arch.local_int; | ||
578 | spin_lock_bh(&li->lock); | ||
579 | if (inti->type == KVM_S390_PROGRAM_INT) | ||
580 | list_add(&inti->list, &li->list); | ||
581 | else | ||
582 | list_add_tail(&inti->list, &li->list); | ||
583 | atomic_set(&li->active, 1); | ||
584 | if (inti->type == KVM_S390_SIGP_STOP) | ||
585 | li->action_bits |= ACTION_STOP_ON_STOP; | ||
586 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | ||
587 | if (waitqueue_active(&li->wq)) | ||
588 | wake_up_interruptible(&vcpu->arch.local_int.wq); | ||
589 | spin_unlock_bh(&li->lock); | ||
590 | mutex_unlock(&vcpu->kvm->lock); | ||
591 | return 0; | ||
592 | } | ||