aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCarsten Otte <cotte@de.ibm.com>2008-03-25 13:47:26 -0400
committerAvi Kivity <avi@qumranet.com>2008-04-27 05:00:44 -0400
commitba5c1e9b6ceebdc39343cc03eb39f077abd3c571 (patch)
treeab07e763ad7d9aad2ef189def5537e73a50c7503
parent8f2abe6a1e525e878bdf58f68ccd146d543fde84 (diff)
KVM: s390: interrupt subsystem, cpu timer, waitpsw
This patch contains the s390 interrupt subsystem (similar to in kernel apic) including timer interrupts (similar to in-kernel-pit) and enabled wait (similar to in kernel hlt). In order to achieve that, this patch also introduces intercept handling for instruction intercepts, and it implements load control instructions. This patch introduces an ioctl KVM_S390_INTERRUPT which is valid for both the vm file descriptors and the vcpu file descriptors. In case this ioctl is issued against a vm file descriptor, the interrupt is considered floating. Floating interrupts may be delivered to any virtual cpu in the configuration. The following interrupts are supported: SIGP STOP - interprocessor signal that stops a remote cpu SIGP SET PREFIX - interprocessor signal that sets the prefix register of a (stopped) remote cpu INT EMERGENCY - interprocessor interrupt, usually used to signal need_reshed and for smp_call_function() in the guest. PROGRAM INT - exception during program execution such as page fault, illegal instruction and friends RESTART - interprocessor signal that starts a stopped cpu INT VIRTIO - floating interrupt for virtio signalisation INT SERVICE - floating interrupt for signalisations from the system service processor struct kvm_s390_interrupt, which is submitted as ioctl parameter when injecting an interrupt, also carrys parameter data for interrupts along with the interrupt type. Interrupts on s390 usually have a state that represents the current operation, or identifies which device has caused the interruption on s390. kvm_s390_handle_wait() does handle waitpsw in two flavors: in case of a disabled wait (that is, disabled for interrupts), we exit to userspace. In case of an enabled wait we set up a timer that equals the cpu clock comparator value and sleep on a wait queue. [christian: change virtio interrupt to 0x2603] Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r--arch/s390/kvm/Makefile2
-rw-r--r--arch/s390/kvm/intercept.c116
-rw-r--r--arch/s390/kvm/interrupt.c587
-rw-r--r--arch/s390/kvm/kvm-s390.c48
-rw-r--r--arch/s390/kvm/kvm-s390.h15
-rw-r--r--include/asm-s390/kvm_host.h75
-rw-r--r--include/linux/kvm.h17
7 files changed, 857 insertions, 3 deletions
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 27882b35ef04..7275a1aa4ee4 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -10,5 +10,5 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
10 10
11EXTRA_CFLAGS += -Ivirt/kvm -Iarch/s390/kvm 11EXTRA_CFLAGS += -Ivirt/kvm -Iarch/s390/kvm
12 12
13kvm-objs := $(common-objs) kvm-s390.o sie64a.o intercept.o 13kvm-objs := $(common-objs) kvm-s390.o sie64a.o intercept.o interrupt.o
14obj-$(CONFIG_KVM) += kvm.o 14obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index e757230b982c..7f7347b5f34a 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -18,6 +18,86 @@
18#include <asm/kvm_host.h> 18#include <asm/kvm_host.h>
19 19
20#include "kvm-s390.h" 20#include "kvm-s390.h"
21#include "gaccess.h"
22
23static int handle_lctg(struct kvm_vcpu *vcpu)
24{
25 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
26 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
27 int base2 = vcpu->arch.sie_block->ipb >> 28;
28 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
29 ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
30 u64 useraddr;
31 int reg, rc;
32
33 vcpu->stat.instruction_lctg++;
34 if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
35 return -ENOTSUPP;
36
37 useraddr = disp2;
38 if (base2)
39 useraddr += vcpu->arch.guest_gprs[base2];
40
41 reg = reg1;
42
43 VCPU_EVENT(vcpu, 5, "lctg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
44 disp2);
45
46 do {
47 rc = get_guest_u64(vcpu, useraddr,
48 &vcpu->arch.sie_block->gcr[reg]);
49 if (rc == -EFAULT) {
50 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
51 break;
52 }
53 useraddr += 8;
54 if (reg == reg3)
55 break;
56 reg = (reg + 1) % 16;
57 } while (1);
58 return 0;
59}
60
61static int handle_lctl(struct kvm_vcpu *vcpu)
62{
63 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
64 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
65 int base2 = vcpu->arch.sie_block->ipb >> 28;
66 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
67 u64 useraddr;
68 u32 val = 0;
69 int reg, rc;
70
71 vcpu->stat.instruction_lctl++;
72
73 useraddr = disp2;
74 if (base2)
75 useraddr += vcpu->arch.guest_gprs[base2];
76
77 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
78 disp2);
79
80 reg = reg1;
81 do {
82 rc = get_guest_u32(vcpu, useraddr, &val);
83 if (rc == -EFAULT) {
84 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
85 break;
86 }
87 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
88 vcpu->arch.sie_block->gcr[reg] |= val;
89 useraddr += 4;
90 if (reg == reg3)
91 break;
92 reg = (reg + 1) % 16;
93 } while (1);
94 return 0;
95}
96
97static intercept_handler_t instruction_handlers[256] = {
98 [0xb7] = handle_lctl,
99 [0xeb] = handle_lctg,
100};
21 101
22static int handle_noop(struct kvm_vcpu *vcpu) 102static int handle_noop(struct kvm_vcpu *vcpu)
23{ 103{
@@ -58,10 +138,46 @@ static int handle_validity(struct kvm_vcpu *vcpu)
58 return -ENOTSUPP; 138 return -ENOTSUPP;
59} 139}
60 140
141static int handle_instruction(struct kvm_vcpu *vcpu)
142{
143 intercept_handler_t handler;
144
145 vcpu->stat.exit_instruction++;
146 handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
147 if (handler)
148 return handler(vcpu);
149 return -ENOTSUPP;
150}
151
152static int handle_prog(struct kvm_vcpu *vcpu)
153{
154 vcpu->stat.exit_program_interruption++;
155 return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc);
156}
157
158static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
159{
160 int rc, rc2;
161
162 vcpu->stat.exit_instr_and_program++;
163 rc = handle_instruction(vcpu);
164 rc2 = handle_prog(vcpu);
165
166 if (rc == -ENOTSUPP)
167 vcpu->arch.sie_block->icptcode = 0x04;
168 if (rc)
169 return rc;
170 return rc2;
171}
172
61static const intercept_handler_t intercept_funcs[0x48 >> 2] = { 173static const intercept_handler_t intercept_funcs[0x48 >> 2] = {
62 [0x00 >> 2] = handle_noop, 174 [0x00 >> 2] = handle_noop,
175 [0x04 >> 2] = handle_instruction,
176 [0x08 >> 2] = handle_prog,
177 [0x0C >> 2] = handle_instruction_and_prog,
63 [0x10 >> 2] = handle_noop, 178 [0x10 >> 2] = handle_noop,
64 [0x14 >> 2] = handle_noop, 179 [0x14 >> 2] = handle_noop,
180 [0x1C >> 2] = kvm_s390_handle_wait,
65 [0x20 >> 2] = handle_validity, 181 [0x20 >> 2] = handle_validity,
66 [0x28 >> 2] = handle_stop, 182 [0x28 >> 2] = handle_stop,
67}; 183};
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
new file mode 100644
index 000000000000..f62588cb75f8
--- /dev/null
+++ b/arch/s390/kvm/interrupt.c
@@ -0,0 +1,587 @@
1/*
2 * interrupt.c - handling kvm guest interrupts
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
13#include <asm/lowcore.h>
14#include <asm/uaccess.h>
15#include <linux/kvm_host.h>
16#include "kvm-s390.h"
17#include "gaccess.h"
18
19static int psw_extint_disabled(struct kvm_vcpu *vcpu)
20{
21 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
22}
23
24static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
25{
26 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
27 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
28 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
29 return 0;
30 return 1;
31}
32
33static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
34 struct interrupt_info *inti)
35{
36 switch (inti->type) {
37 case KVM_S390_INT_EMERGENCY:
38 if (psw_extint_disabled(vcpu))
39 return 0;
40 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
41 return 1;
42 return 0;
43 case KVM_S390_INT_SERVICE:
44 if (psw_extint_disabled(vcpu))
45 return 0;
46 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
47 return 1;
48 return 0;
49 case KVM_S390_INT_VIRTIO:
50 if (psw_extint_disabled(vcpu))
51 return 0;
52 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
53 return 1;
54 return 0;
55 case KVM_S390_PROGRAM_INT:
56 case KVM_S390_SIGP_STOP:
57 case KVM_S390_SIGP_SET_PREFIX:
58 case KVM_S390_RESTART:
59 return 1;
60 default:
61 BUG();
62 }
63 return 0;
64}
65
66static void __set_cpu_idle(struct kvm_vcpu *vcpu)
67{
68 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
69 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
70 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
71}
72
73static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
74{
75 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
76 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
77 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
78}
79
80static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
81{
82 atomic_clear_mask(CPUSTAT_ECALL_PEND |
83 CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
84 &vcpu->arch.sie_block->cpuflags);
85 vcpu->arch.sie_block->lctl = 0x0000;
86}
87
88static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
89{
90 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
91}
92
93static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
94 struct interrupt_info *inti)
95{
96 switch (inti->type) {
97 case KVM_S390_INT_EMERGENCY:
98 case KVM_S390_INT_SERVICE:
99 case KVM_S390_INT_VIRTIO:
100 if (psw_extint_disabled(vcpu))
101 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
102 else
103 vcpu->arch.sie_block->lctl |= LCTL_CR0;
104 break;
105 case KVM_S390_SIGP_STOP:
106 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
107 break;
108 default:
109 BUG();
110 }
111}
112
113static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
114 struct interrupt_info *inti)
115{
116 const unsigned short table[] = { 2, 4, 4, 6 };
117 int rc, exception = 0;
118
119 switch (inti->type) {
120 case KVM_S390_INT_EMERGENCY:
121 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
122 vcpu->stat.deliver_emergency_signal++;
123 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
124 if (rc == -EFAULT)
125 exception = 1;
126
127 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
128 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
129 if (rc == -EFAULT)
130 exception = 1;
131
132 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
133 __LC_EXT_NEW_PSW, sizeof(psw_t));
134 if (rc == -EFAULT)
135 exception = 1;
136 break;
137
138 case KVM_S390_INT_SERVICE:
139 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
140 inti->ext.ext_params);
141 vcpu->stat.deliver_service_signal++;
142 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
143 if (rc == -EFAULT)
144 exception = 1;
145
146 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
147 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
148 if (rc == -EFAULT)
149 exception = 1;
150
151 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
152 __LC_EXT_NEW_PSW, sizeof(psw_t));
153 if (rc == -EFAULT)
154 exception = 1;
155
156 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
157 if (rc == -EFAULT)
158 exception = 1;
159 break;
160
161 case KVM_S390_INT_VIRTIO:
162 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%lx",
163 inti->ext.ext_params, inti->ext.ext_params2);
164 vcpu->stat.deliver_virtio_interrupt++;
165 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
166 if (rc == -EFAULT)
167 exception = 1;
168
169 rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, 0x0d00);
170 if (rc == -EFAULT)
171 exception = 1;
172
173 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
174 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
175 if (rc == -EFAULT)
176 exception = 1;
177
178 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
179 __LC_EXT_NEW_PSW, sizeof(psw_t));
180 if (rc == -EFAULT)
181 exception = 1;
182
183 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
184 if (rc == -EFAULT)
185 exception = 1;
186
187 rc = put_guest_u64(vcpu, __LC_PFAULT_INTPARM,
188 inti->ext.ext_params2);
189 if (rc == -EFAULT)
190 exception = 1;
191 break;
192
193 case KVM_S390_SIGP_STOP:
194 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
195 vcpu->stat.deliver_stop_signal++;
196 __set_intercept_indicator(vcpu, inti);
197 break;
198
199 case KVM_S390_SIGP_SET_PREFIX:
200 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
201 inti->prefix.address);
202 vcpu->stat.deliver_prefix_signal++;
203 vcpu->arch.sie_block->prefix = inti->prefix.address;
204 vcpu->arch.sie_block->ihcpu = 0xffff;
205 break;
206
207 case KVM_S390_RESTART:
208 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
209 vcpu->stat.deliver_restart_signal++;
210 rc = copy_to_guest(vcpu, offsetof(struct _lowcore,
211 restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
212 if (rc == -EFAULT)
213 exception = 1;
214
215 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
216 offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
217 if (rc == -EFAULT)
218 exception = 1;
219 break;
220
221 case KVM_S390_PROGRAM_INT:
222 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
223 inti->pgm.code,
224 table[vcpu->arch.sie_block->ipa >> 14]);
225 vcpu->stat.deliver_program_int++;
226 rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
227 if (rc == -EFAULT)
228 exception = 1;
229
230 rc = put_guest_u16(vcpu, __LC_PGM_ILC,
231 table[vcpu->arch.sie_block->ipa >> 14]);
232 if (rc == -EFAULT)
233 exception = 1;
234
235 rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
236 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
237 if (rc == -EFAULT)
238 exception = 1;
239
240 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
241 __LC_PGM_NEW_PSW, sizeof(psw_t));
242 if (rc == -EFAULT)
243 exception = 1;
244 break;
245
246 default:
247 BUG();
248 }
249
250 if (exception) {
251 VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering"
252 " interrupt");
253 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
254 if (inti->type == KVM_S390_PROGRAM_INT) {
255 printk(KERN_WARNING "kvm: recursive program check\n");
256 BUG();
257 }
258 }
259}
260
261static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
262{
263 int rc, exception = 0;
264
265 if (psw_extint_disabled(vcpu))
266 return 0;
267 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
268 return 0;
269 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
270 if (rc == -EFAULT)
271 exception = 1;
272 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
273 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
274 if (rc == -EFAULT)
275 exception = 1;
276 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
277 __LC_EXT_NEW_PSW, sizeof(psw_t));
278 if (rc == -EFAULT)
279 exception = 1;
280
281 if (exception) {
282 VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" \
283 " ckc interrupt");
284 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
285 return 0;
286 }
287
288 return 1;
289}
290
291int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
292{
293 struct local_interrupt *li = &vcpu->arch.local_int;
294 struct float_interrupt *fi = vcpu->arch.local_int.float_int;
295 struct interrupt_info *inti;
296 int rc = 0;
297
298 if (atomic_read(&li->active)) {
299 spin_lock_bh(&li->lock);
300 list_for_each_entry(inti, &li->list, list)
301 if (__interrupt_is_deliverable(vcpu, inti)) {
302 rc = 1;
303 break;
304 }
305 spin_unlock_bh(&li->lock);
306 }
307
308 if ((!rc) && atomic_read(&fi->active)) {
309 spin_lock_bh(&fi->lock);
310 list_for_each_entry(inti, &fi->list, list)
311 if (__interrupt_is_deliverable(vcpu, inti)) {
312 rc = 1;
313 break;
314 }
315 spin_unlock_bh(&fi->lock);
316 }
317
318 if ((!rc) && (vcpu->arch.sie_block->ckc <
319 get_clock() + vcpu->arch.sie_block->epoch)) {
320 if ((!psw_extint_disabled(vcpu)) &&
321 (vcpu->arch.sie_block->gcr[0] & 0x800ul))
322 rc = 1;
323 }
324
325 return rc;
326}
327
328int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
329{
330 u64 now, sltime;
331 DECLARE_WAITQUEUE(wait, current);
332
333 vcpu->stat.exit_wait_state++;
334 if (kvm_cpu_has_interrupt(vcpu))
335 return 0;
336
337 if (psw_interrupts_disabled(vcpu)) {
338 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
339 __unset_cpu_idle(vcpu);
340 return -ENOTSUPP; /* disabled wait */
341 }
342
343 if (psw_extint_disabled(vcpu) ||
344 (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) {
345 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
346 goto no_timer;
347 }
348
349 now = get_clock() + vcpu->arch.sie_block->epoch;
350 if (vcpu->arch.sie_block->ckc < now) {
351 __unset_cpu_idle(vcpu);
352 return 0;
353 }
354
355 sltime = (vcpu->arch.sie_block->ckc - now) / (0xf4240000ul / HZ) + 1;
356
357 vcpu->arch.ckc_timer.expires = jiffies + sltime;
358
359 add_timer(&vcpu->arch.ckc_timer);
360 VCPU_EVENT(vcpu, 5, "enabled wait timer:%lx jiffies", sltime);
361no_timer:
362 spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
363 spin_lock_bh(&vcpu->arch.local_int.lock);
364 __set_cpu_idle(vcpu);
365 vcpu->arch.local_int.timer_due = 0;
366 add_wait_queue(&vcpu->arch.local_int.wq, &wait);
367 while (list_empty(&vcpu->arch.local_int.list) &&
368 list_empty(&vcpu->arch.local_int.float_int->list) &&
369 (!vcpu->arch.local_int.timer_due) &&
370 !signal_pending(current)) {
371 set_current_state(TASK_INTERRUPTIBLE);
372 spin_unlock_bh(&vcpu->arch.local_int.lock);
373 spin_unlock_bh(&vcpu->arch.local_int.float_int->lock);
374 vcpu_put(vcpu);
375 schedule();
376 vcpu_load(vcpu);
377 spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
378 spin_lock_bh(&vcpu->arch.local_int.lock);
379 }
380 __unset_cpu_idle(vcpu);
381 __set_current_state(TASK_RUNNING);
382 remove_wait_queue(&vcpu->wq, &wait);
383 spin_unlock_bh(&vcpu->arch.local_int.lock);
384 spin_unlock_bh(&vcpu->arch.local_int.float_int->lock);
385 del_timer(&vcpu->arch.ckc_timer);
386 return 0;
387}
388
389void kvm_s390_idle_wakeup(unsigned long data)
390{
391 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
392
393 spin_lock_bh(&vcpu->arch.local_int.lock);
394 vcpu->arch.local_int.timer_due = 1;
395 if (waitqueue_active(&vcpu->arch.local_int.wq))
396 wake_up_interruptible(&vcpu->arch.local_int.wq);
397 spin_unlock_bh(&vcpu->arch.local_int.lock);
398}
399
400
401void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
402{
403 struct local_interrupt *li = &vcpu->arch.local_int;
404 struct float_interrupt *fi = vcpu->arch.local_int.float_int;
405 struct interrupt_info *n, *inti = NULL;
406 int deliver;
407
408 __reset_intercept_indicators(vcpu);
409 if (atomic_read(&li->active)) {
410 do {
411 deliver = 0;
412 spin_lock_bh(&li->lock);
413 list_for_each_entry_safe(inti, n, &li->list, list) {
414 if (__interrupt_is_deliverable(vcpu, inti)) {
415 list_del(&inti->list);
416 deliver = 1;
417 break;
418 }
419 __set_intercept_indicator(vcpu, inti);
420 }
421 if (list_empty(&li->list))
422 atomic_set(&li->active, 0);
423 spin_unlock_bh(&li->lock);
424 if (deliver) {
425 __do_deliver_interrupt(vcpu, inti);
426 kfree(inti);
427 }
428 } while (deliver);
429 }
430
431 if ((vcpu->arch.sie_block->ckc <
432 get_clock() + vcpu->arch.sie_block->epoch))
433 __try_deliver_ckc_interrupt(vcpu);
434
435 if (atomic_read(&fi->active)) {
436 do {
437 deliver = 0;
438 spin_lock_bh(&fi->lock);
439 list_for_each_entry_safe(inti, n, &fi->list, list) {
440 if (__interrupt_is_deliverable(vcpu, inti)) {
441 list_del(&inti->list);
442 deliver = 1;
443 break;
444 }
445 __set_intercept_indicator(vcpu, inti);
446 }
447 if (list_empty(&fi->list))
448 atomic_set(&fi->active, 0);
449 spin_unlock_bh(&fi->lock);
450 if (deliver) {
451 __do_deliver_interrupt(vcpu, inti);
452 kfree(inti);
453 }
454 } while (deliver);
455 }
456}
457
458int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
459{
460 struct local_interrupt *li = &vcpu->arch.local_int;
461 struct interrupt_info *inti;
462
463 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
464 if (!inti)
465 return -ENOMEM;
466
467 inti->type = KVM_S390_PROGRAM_INT;;
468 inti->pgm.code = code;
469
470 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
471 spin_lock_bh(&li->lock);
472 list_add(&inti->list, &li->list);
473 atomic_set(&li->active, 1);
474 BUG_ON(waitqueue_active(&li->wq));
475 spin_unlock_bh(&li->lock);
476 return 0;
477}
478
479int kvm_s390_inject_vm(struct kvm *kvm,
480 struct kvm_s390_interrupt *s390int)
481{
482 struct local_interrupt *li;
483 struct float_interrupt *fi;
484 struct interrupt_info *inti;
485 int sigcpu;
486
487 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
488 if (!inti)
489 return -ENOMEM;
490
491 switch (s390int->type) {
492 case KVM_S390_INT_VIRTIO:
493 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%lx",
494 s390int->parm, s390int->parm64);
495 inti->type = s390int->type;
496 inti->ext.ext_params = s390int->parm;
497 inti->ext.ext_params2 = s390int->parm64;
498 break;
499 case KVM_S390_INT_SERVICE:
500 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
501 inti->type = s390int->type;
502 inti->ext.ext_params = s390int->parm;
503 break;
504 case KVM_S390_PROGRAM_INT:
505 case KVM_S390_SIGP_STOP:
506 case KVM_S390_INT_EMERGENCY:
507 default:
508 kfree(inti);
509 return -EINVAL;
510 }
511
512 mutex_lock(&kvm->lock);
513 fi = &kvm->arch.float_int;
514 spin_lock_bh(&fi->lock);
515 list_add_tail(&inti->list, &fi->list);
516 atomic_set(&fi->active, 1);
517 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
518 if (sigcpu == KVM_MAX_VCPUS) {
519 do {
520 sigcpu = fi->next_rr_cpu++;
521 if (sigcpu == KVM_MAX_VCPUS)
522 sigcpu = fi->next_rr_cpu = 0;
523 } while (fi->local_int[sigcpu] == NULL);
524 }
525 li = fi->local_int[sigcpu];
526 spin_lock_bh(&li->lock);
527 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
528 if (waitqueue_active(&li->wq))
529 wake_up_interruptible(&li->wq);
530 spin_unlock_bh(&li->lock);
531 spin_unlock_bh(&fi->lock);
532 mutex_unlock(&kvm->lock);
533 return 0;
534}
535
536int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
537 struct kvm_s390_interrupt *s390int)
538{
539 struct local_interrupt *li;
540 struct interrupt_info *inti;
541
542 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
543 if (!inti)
544 return -ENOMEM;
545
546 switch (s390int->type) {
547 case KVM_S390_PROGRAM_INT:
548 if (s390int->parm & 0xffff0000) {
549 kfree(inti);
550 return -EINVAL;
551 }
552 inti->type = s390int->type;
553 inti->pgm.code = s390int->parm;
554 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
555 s390int->parm);
556 break;
557 case KVM_S390_SIGP_STOP:
558 case KVM_S390_RESTART:
559 case KVM_S390_SIGP_SET_PREFIX:
560 case KVM_S390_INT_EMERGENCY:
561 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
562 inti->type = s390int->type;
563 break;
564 case KVM_S390_INT_VIRTIO:
565 case KVM_S390_INT_SERVICE:
566 default:
567 kfree(inti);
568 return -EINVAL;
569 }
570
571 mutex_lock(&vcpu->kvm->lock);
572 li = &vcpu->arch.local_int;
573 spin_lock_bh(&li->lock);
574 if (inti->type == KVM_S390_PROGRAM_INT)
575 list_add(&inti->list, &li->list);
576 else
577 list_add_tail(&inti->list, &li->list);
578 atomic_set(&li->active, 1);
579 if (inti->type == KVM_S390_SIGP_STOP)
580 li->action_bits |= ACTION_STOP_ON_STOP;
581 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
582 if (waitqueue_active(&li->wq))
583 wake_up_interruptible(&vcpu->arch.local_int.wq);
584 spin_unlock_bh(&li->lock);
585 mutex_unlock(&vcpu->kvm->lock);
586 return 0;
587}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index a906499214bb..5e3473c9a639 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -20,6 +20,7 @@
20#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/timer.h>
23#include <asm/lowcore.h> 24#include <asm/lowcore.h>
24#include <asm/pgtable.h> 25#include <asm/pgtable.h>
25 26
@@ -34,6 +35,19 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
34 { "exit_stop_request", VCPU_STAT(exit_stop_request) }, 35 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
35 { "exit_external_request", VCPU_STAT(exit_external_request) }, 36 { "exit_external_request", VCPU_STAT(exit_external_request) },
36 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, 37 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
38 { "exit_instruction", VCPU_STAT(exit_instruction) },
39 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
40 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
41 { "instruction_lctg", VCPU_STAT(instruction_lctg) },
42 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
43 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
44 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
45 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
46 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
47 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
48 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
49 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
50 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
37 { NULL } 51 { NULL }
38}; 52};
39 53
@@ -106,6 +120,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
106 int r; 120 int r;
107 121
108 switch (ioctl) { 122 switch (ioctl) {
123 case KVM_S390_INTERRUPT: {
124 struct kvm_s390_interrupt s390int;
125
126 r = -EFAULT;
127 if (copy_from_user(&s390int, argp, sizeof(s390int)))
128 break;
129 r = kvm_s390_inject_vm(kvm, &s390int);
130 break;
131 }
109 default: 132 default:
110 r = -EINVAL; 133 r = -EINVAL;
111 } 134 }
@@ -138,6 +161,9 @@ struct kvm *kvm_arch_create_vm(void)
138 if (!kvm->arch.dbf) 161 if (!kvm->arch.dbf)
139 goto out_nodbf; 162 goto out_nodbf;
140 163
164 spin_lock_init(&kvm->arch.float_int.lock);
165 INIT_LIST_HEAD(&kvm->arch.float_int.list);
166
141 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 167 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
142 VM_EVENT(kvm, 3, "%s", "vm created"); 168 VM_EVENT(kvm, 3, "%s", "vm created");
143 169
@@ -218,7 +244,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
218 vcpu->arch.sie_block->gmsor = 0x000000000000; 244 vcpu->arch.sie_block->gmsor = 0x000000000000;
219 vcpu->arch.sie_block->ecb = 2; 245 vcpu->arch.sie_block->ecb = 2;
220 vcpu->arch.sie_block->eca = 0xC1002001U; 246 vcpu->arch.sie_block->eca = 0xC1002001U;
221 247 setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
248 (unsigned long) vcpu);
222 return 0; 249 return 0;
223} 250}
224 251
@@ -243,6 +270,14 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
243 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); 270 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
244 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 271 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
245 272
273 spin_lock_init(&vcpu->arch.local_int.lock);
274 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
275 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
276 spin_lock_bh(&kvm->arch.float_int.lock);
277 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
278 init_waitqueue_head(&vcpu->arch.local_int.wq);
279 spin_unlock_bh(&kvm->arch.float_int.lock);
280
246 rc = kvm_vcpu_init(vcpu, kvm, id); 281 rc = kvm_vcpu_init(vcpu, kvm, id);
247 if (rc) 282 if (rc)
248 goto out_free_cpu; 283 goto out_free_cpu;
@@ -395,6 +430,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
395 430
396 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 431 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
397 432
433 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
434
398 switch (kvm_run->exit_reason) { 435 switch (kvm_run->exit_reason) {
399 case KVM_EXIT_S390_SIEIC: 436 case KVM_EXIT_S390_SIEIC:
400 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask; 437 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
@@ -410,8 +447,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
410 might_sleep(); 447 might_sleep();
411 448
412 do { 449 do {
450 kvm_s390_deliver_pending_interrupts(vcpu);
413 __vcpu_run(vcpu); 451 __vcpu_run(vcpu);
414
415 rc = kvm_handle_sie_intercept(vcpu); 452 rc = kvm_handle_sie_intercept(vcpu);
416 } while (!signal_pending(current) && !rc); 453 } while (!signal_pending(current) && !rc);
417 454
@@ -538,6 +575,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
538 void __user *argp = (void __user *)arg; 575 void __user *argp = (void __user *)arg;
539 576
540 switch (ioctl) { 577 switch (ioctl) {
578 case KVM_S390_INTERRUPT: {
579 struct kvm_s390_interrupt s390int;
580
581 if (copy_from_user(&s390int, argp, sizeof(s390int)))
582 return -EFAULT;
583 return kvm_s390_inject_vcpu(vcpu, &s390int);
584 }
541 case KVM_S390_STORE_STATUS: 585 case KVM_S390_STORE_STATUS:
542 return kvm_s390_vcpu_store_status(vcpu, arg); 586 return kvm_s390_vcpu_store_status(vcpu, arg);
543 case KVM_S390_SET_INITIAL_PSW: { 587 case KVM_S390_SET_INITIAL_PSW: {
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 5b82527b7f86..8df745bc08db 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -14,6 +14,7 @@
14#ifndef ARCH_S390_KVM_S390_H 14#ifndef ARCH_S390_KVM_S390_H
15#define ARCH_S390_KVM_S390_H 15#define ARCH_S390_KVM_S390_H
16 16
17#include <linux/kvm.h>
17#include <linux/kvm_host.h> 18#include <linux/kvm_host.h>
18 19
19typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); 20typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
@@ -33,4 +34,18 @@ do { \
33 d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\ 34 d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
34 d_args); \ 35 d_args); \
35} while (0) 36} while (0)
37
38static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu)
39{
40 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOP_INT;
41}
42
43int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
44void kvm_s390_idle_wakeup(unsigned long data);
45void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
46int kvm_s390_inject_vm(struct kvm *kvm,
47 struct kvm_s390_interrupt *s390int);
48int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
49 struct kvm_s390_interrupt *s390int);
50int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
36#endif 51#endif
diff --git a/include/asm-s390/kvm_host.h b/include/asm-s390/kvm_host.h
index 8965b38d0a32..4fe19305888a 100644
--- a/include/asm-s390/kvm_host.h
+++ b/include/asm-s390/kvm_host.h
@@ -70,6 +70,7 @@ struct sie_block {
70 __u64 ckc; /* 0x0030 */ 70 __u64 ckc; /* 0x0030 */
71 __u64 epoch; /* 0x0038 */ 71 __u64 epoch; /* 0x0038 */
72 __u8 reserved40[4]; /* 0x0040 */ 72 __u8 reserved40[4]; /* 0x0040 */
73#define LCTL_CR0 0x8000
73 __u16 lctl; /* 0x0044 */ 74 __u16 lctl; /* 0x0044 */
74 __s16 icpua; /* 0x0046 */ 75 __s16 icpua; /* 0x0046 */
75 __u32 ictl; /* 0x0048 */ 76 __u32 ictl; /* 0x0048 */
@@ -105,8 +106,79 @@ struct kvm_vcpu_stat {
105 u32 exit_external_interrupt; 106 u32 exit_external_interrupt;
106 u32 exit_stop_request; 107 u32 exit_stop_request;
107 u32 exit_validity; 108 u32 exit_validity;
109 u32 exit_instruction;
110 u32 instruction_lctl;
111 u32 instruction_lctg;
112 u32 exit_program_interruption;
113 u32 exit_instr_and_program;
114 u32 deliver_emergency_signal;
115 u32 deliver_service_signal;
116 u32 deliver_virtio_interrupt;
117 u32 deliver_stop_signal;
118 u32 deliver_prefix_signal;
119 u32 deliver_restart_signal;
120 u32 deliver_program_int;
121 u32 exit_wait_state;
108}; 122};
109 123
124struct io_info {
125 __u16 subchannel_id; /* 0x0b8 */
126 __u16 subchannel_nr; /* 0x0ba */
127 __u32 io_int_parm; /* 0x0bc */
128 __u32 io_int_word; /* 0x0c0 */
129};
130
131struct ext_info {
132 __u32 ext_params;
133 __u64 ext_params2;
134};
135
136#define PGM_OPERATION 0x01
137#define PGM_PRIVILEGED_OPERATION 0x02
138#define PGM_EXECUTE 0x03
139#define PGM_PROTECTION 0x04
140#define PGM_ADDRESSING 0x05
141#define PGM_SPECIFICATION 0x06
142#define PGM_DATA 0x07
143
144struct pgm_info {
145 __u16 code;
146};
147
148struct prefix_info {
149 __u32 address;
150};
151
152struct interrupt_info {
153 struct list_head list;
154 u64 type;
155 union {
156 struct io_info io;
157 struct ext_info ext;
158 struct pgm_info pgm;
159 struct prefix_info prefix;
160 };
161};
162
163struct local_interrupt {
164 spinlock_t lock;
165 struct list_head list;
166 atomic_t active;
167 struct float_interrupt *float_int;
168 int timer_due; /* event indicator for waitqueue below */
169 wait_queue_head_t wq;
170};
171
172struct float_interrupt {
173 spinlock_t lock;
174 struct list_head list;
175 atomic_t active;
176 int next_rr_cpu;
177 unsigned long idle_mask [(64 + sizeof(long) - 1) / sizeof(long)];
178 struct local_interrupt *local_int[64];
179};
180
181
110struct kvm_vcpu_arch { 182struct kvm_vcpu_arch {
111 struct sie_block *sie_block; 183 struct sie_block *sie_block;
112 unsigned long guest_gprs[16]; 184 unsigned long guest_gprs[16];
@@ -114,6 +186,8 @@ struct kvm_vcpu_arch {
114 unsigned int host_acrs[NUM_ACRS]; 186 unsigned int host_acrs[NUM_ACRS];
115 s390_fp_regs guest_fpregs; 187 s390_fp_regs guest_fpregs;
116 unsigned int guest_acrs[NUM_ACRS]; 188 unsigned int guest_acrs[NUM_ACRS];
189 struct local_interrupt local_int;
190 struct timer_list ckc_timer;
117}; 191};
118 192
119struct kvm_vm_stat { 193struct kvm_vm_stat {
@@ -125,6 +199,7 @@ struct kvm_arch{
125 unsigned long guest_memsize; 199 unsigned long guest_memsize;
126 struct sca_block *sca; 200 struct sca_block *sca;
127 debug_info_t *dbf; 201 debug_info_t *dbf;
202 struct float_interrupt float_int;
128}; 203};
129 204
130extern int sie64a(struct sie_block *, __u64 *); 205extern int sie64a(struct sie_block *, __u64 *);
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index f2acd6b9ab4d..029f0284a2fd 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -219,6 +219,21 @@ struct kvm_s390_psw {
219 __u64 addr; 219 __u64 addr;
220}; 220};
221 221
222/* valid values for type in kvm_s390_interrupt */
223#define KVM_S390_SIGP_STOP 0xfffe0000u
224#define KVM_S390_PROGRAM_INT 0xfffe0001u
225#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u
226#define KVM_S390_RESTART 0xfffe0003u
227#define KVM_S390_INT_VIRTIO 0xffff2603u
228#define KVM_S390_INT_SERVICE 0xffff2401u
229#define KVM_S390_INT_EMERGENCY 0xffff1201u
230
231struct kvm_s390_interrupt {
232 __u32 type;
233 __u32 parm;
234 __u64 parm64;
235};
236
222#define KVMIO 0xAE 237#define KVMIO 0xAE
223 238
224/* 239/*
@@ -307,6 +322,8 @@ struct kvm_s390_psw {
307#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl) 322#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl)
308/* Available with KVM_CAP_VAPIC */ 323/* Available with KVM_CAP_VAPIC */
309#define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr) 324#define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr)
325/* valid for virtual machine (for floating interrupt)_and_ vcpu */
326#define KVM_S390_INTERRUPT _IOW(KVMIO, 0x94, struct kvm_s390_interrupt)
310/* store status for s390 */ 327/* store status for s390 */
311#define KVM_S390_STORE_STATUS_NOADDR (-1ul) 328#define KVM_S390_STORE_STATUS_NOADDR (-1ul)
312#define KVM_S390_STORE_STATUS_PREFIXED (-2ul) 329#define KVM_S390_STORE_STATUS_PREFIXED (-2ul)