diff options
Diffstat (limited to 'arch/s390/kvm/sigp.c')
-rw-r--r-- | arch/s390/kvm/sigp.c | 305 |
1 files changed, 148 insertions, 157 deletions
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index cf243ba3d50f..6651f9f73973 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -20,20 +20,13 @@ | |||
20 | #include "kvm-s390.h" | 20 | #include "kvm-s390.h" |
21 | #include "trace.h" | 21 | #include "trace.h" |
22 | 22 | ||
23 | static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, | 23 | static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, |
24 | u64 *reg) | 24 | u64 *reg) |
25 | { | 25 | { |
26 | struct kvm_s390_local_interrupt *li; | 26 | struct kvm_s390_local_interrupt *li; |
27 | struct kvm_vcpu *dst_vcpu = NULL; | ||
28 | int cpuflags; | 27 | int cpuflags; |
29 | int rc; | 28 | int rc; |
30 | 29 | ||
31 | if (cpu_addr >= KVM_MAX_VCPUS) | ||
32 | return SIGP_CC_NOT_OPERATIONAL; | ||
33 | |||
34 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | ||
35 | if (!dst_vcpu) | ||
36 | return SIGP_CC_NOT_OPERATIONAL; | ||
37 | li = &dst_vcpu->arch.local_int; | 30 | li = &dst_vcpu->arch.local_int; |
38 | 31 | ||
39 | cpuflags = atomic_read(li->cpuflags); | 32 | cpuflags = atomic_read(li->cpuflags); |
@@ -48,55 +41,53 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, | |||
48 | rc = SIGP_CC_STATUS_STORED; | 41 | rc = SIGP_CC_STATUS_STORED; |
49 | } | 42 | } |
50 | 43 | ||
51 | VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); | 44 | VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id, |
45 | rc); | ||
52 | return rc; | 46 | return rc; |
53 | } | 47 | } |
54 | 48 | ||
55 | static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) | 49 | static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, |
50 | struct kvm_vcpu *dst_vcpu) | ||
56 | { | 51 | { |
57 | struct kvm_s390_interrupt s390int = { | 52 | struct kvm_s390_irq irq = { |
58 | .type = KVM_S390_INT_EMERGENCY, | 53 | .type = KVM_S390_INT_EMERGENCY, |
59 | .parm = vcpu->vcpu_id, | 54 | .u.emerg.code = vcpu->vcpu_id, |
60 | }; | 55 | }; |
61 | struct kvm_vcpu *dst_vcpu = NULL; | ||
62 | int rc = 0; | 56 | int rc = 0; |
63 | 57 | ||
64 | if (cpu_addr < KVM_MAX_VCPUS) | 58 | rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); |
65 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | ||
66 | if (!dst_vcpu) | ||
67 | return SIGP_CC_NOT_OPERATIONAL; | ||
68 | |||
69 | rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int); | ||
70 | if (!rc) | 59 | if (!rc) |
71 | VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); | 60 | VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", |
61 | dst_vcpu->vcpu_id); | ||
72 | 62 | ||
73 | return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; | 63 | return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; |
74 | } | 64 | } |
75 | 65 | ||
76 | static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, | 66 | static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) |
67 | { | ||
68 | return __inject_sigp_emergency(vcpu, dst_vcpu); | ||
69 | } | ||
70 | |||
71 | static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, | ||
72 | struct kvm_vcpu *dst_vcpu, | ||
77 | u16 asn, u64 *reg) | 73 | u16 asn, u64 *reg) |
78 | { | 74 | { |
79 | struct kvm_vcpu *dst_vcpu = NULL; | ||
80 | const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; | 75 | const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; |
81 | u16 p_asn, s_asn; | 76 | u16 p_asn, s_asn; |
82 | psw_t *psw; | 77 | psw_t *psw; |
83 | u32 flags; | 78 | u32 flags; |
84 | 79 | ||
85 | if (cpu_addr < KVM_MAX_VCPUS) | ||
86 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | ||
87 | if (!dst_vcpu) | ||
88 | return SIGP_CC_NOT_OPERATIONAL; | ||
89 | flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags); | 80 | flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags); |
90 | psw = &dst_vcpu->arch.sie_block->gpsw; | 81 | psw = &dst_vcpu->arch.sie_block->gpsw; |
91 | p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ | 82 | p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ |
92 | s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ | 83 | s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ |
93 | 84 | ||
94 | /* Deliver the emergency signal? */ | 85 | /* Inject the emergency signal? */ |
95 | if (!(flags & CPUSTAT_STOPPED) | 86 | if (!(flags & CPUSTAT_STOPPED) |
96 | || (psw->mask & psw_int_mask) != psw_int_mask | 87 | || (psw->mask & psw_int_mask) != psw_int_mask |
97 | || ((flags & CPUSTAT_WAIT) && psw->addr != 0) | 88 | || ((flags & CPUSTAT_WAIT) && psw->addr != 0) |
98 | || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) { | 89 | || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) { |
99 | return __sigp_emergency(vcpu, cpu_addr); | 90 | return __inject_sigp_emergency(vcpu, dst_vcpu); |
100 | } else { | 91 | } else { |
101 | *reg &= 0xffffffff00000000UL; | 92 | *reg &= 0xffffffff00000000UL; |
102 | *reg |= SIGP_STATUS_INCORRECT_STATE; | 93 | *reg |= SIGP_STATUS_INCORRECT_STATE; |
@@ -104,23 +95,19 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, | |||
104 | } | 95 | } |
105 | } | 96 | } |
106 | 97 | ||
107 | static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) | 98 | static int __sigp_external_call(struct kvm_vcpu *vcpu, |
99 | struct kvm_vcpu *dst_vcpu) | ||
108 | { | 100 | { |
109 | struct kvm_s390_interrupt s390int = { | 101 | struct kvm_s390_irq irq = { |
110 | .type = KVM_S390_INT_EXTERNAL_CALL, | 102 | .type = KVM_S390_INT_EXTERNAL_CALL, |
111 | .parm = vcpu->vcpu_id, | 103 | .u.extcall.code = vcpu->vcpu_id, |
112 | }; | 104 | }; |
113 | struct kvm_vcpu *dst_vcpu = NULL; | ||
114 | int rc; | 105 | int rc; |
115 | 106 | ||
116 | if (cpu_addr < KVM_MAX_VCPUS) | 107 | rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); |
117 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | ||
118 | if (!dst_vcpu) | ||
119 | return SIGP_CC_NOT_OPERATIONAL; | ||
120 | |||
121 | rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int); | ||
122 | if (!rc) | 108 | if (!rc) |
123 | VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); | 109 | VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", |
110 | dst_vcpu->vcpu_id); | ||
124 | 111 | ||
125 | return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; | 112 | return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; |
126 | } | 113 | } |
@@ -128,29 +115,20 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
128 | static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action) | 115 | static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action) |
129 | { | 116 | { |
130 | struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; | 117 | struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; |
131 | struct kvm_s390_interrupt_info *inti; | ||
132 | int rc = SIGP_CC_ORDER_CODE_ACCEPTED; | 118 | int rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
133 | 119 | ||
134 | inti = kzalloc(sizeof(*inti), GFP_ATOMIC); | ||
135 | if (!inti) | ||
136 | return -ENOMEM; | ||
137 | inti->type = KVM_S390_SIGP_STOP; | ||
138 | |||
139 | spin_lock(&li->lock); | 120 | spin_lock(&li->lock); |
140 | if (li->action_bits & ACTION_STOP_ON_STOP) { | 121 | if (li->action_bits & ACTION_STOP_ON_STOP) { |
141 | /* another SIGP STOP is pending */ | 122 | /* another SIGP STOP is pending */ |
142 | kfree(inti); | ||
143 | rc = SIGP_CC_BUSY; | 123 | rc = SIGP_CC_BUSY; |
144 | goto out; | 124 | goto out; |
145 | } | 125 | } |
146 | if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { | 126 | if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { |
147 | kfree(inti); | ||
148 | if ((action & ACTION_STORE_ON_STOP) != 0) | 127 | if ((action & ACTION_STORE_ON_STOP) != 0) |
149 | rc = -ESHUTDOWN; | 128 | rc = -ESHUTDOWN; |
150 | goto out; | 129 | goto out; |
151 | } | 130 | } |
152 | list_add_tail(&inti->list, &li->list); | 131 | set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); |
153 | atomic_set(&li->active, 1); | ||
154 | li->action_bits |= action; | 132 | li->action_bits |= action; |
155 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); | 133 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); |
156 | kvm_s390_vcpu_wakeup(dst_vcpu); | 134 | kvm_s390_vcpu_wakeup(dst_vcpu); |
@@ -160,23 +138,27 @@ out: | |||
160 | return rc; | 138 | return rc; |
161 | } | 139 | } |
162 | 140 | ||
163 | static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) | 141 | static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) |
164 | { | 142 | { |
165 | struct kvm_vcpu *dst_vcpu = NULL; | ||
166 | int rc; | 143 | int rc; |
167 | 144 | ||
168 | if (cpu_addr >= KVM_MAX_VCPUS) | 145 | rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP); |
169 | return SIGP_CC_NOT_OPERATIONAL; | 146 | VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id); |
170 | 147 | ||
171 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | 148 | return rc; |
172 | if (!dst_vcpu) | 149 | } |
173 | return SIGP_CC_NOT_OPERATIONAL; | ||
174 | 150 | ||
175 | rc = __inject_sigp_stop(dst_vcpu, action); | 151 | static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu, |
152 | struct kvm_vcpu *dst_vcpu, u64 *reg) | ||
153 | { | ||
154 | int rc; | ||
176 | 155 | ||
177 | VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); | 156 | rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP | |
157 | ACTION_STORE_ON_STOP); | ||
158 | VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x", | ||
159 | dst_vcpu->vcpu_id); | ||
178 | 160 | ||
179 | if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) { | 161 | if (rc == -ESHUTDOWN) { |
180 | /* If the CPU has already been stopped, we still have | 162 | /* If the CPU has already been stopped, we still have |
181 | * to save the status when doing stop-and-store. This | 163 | * to save the status when doing stop-and-store. This |
182 | * has to be done after unlocking all spinlocks. */ | 164 | * has to be done after unlocking all spinlocks. */ |
@@ -212,18 +194,12 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) | |||
212 | return rc; | 194 | return rc; |
213 | } | 195 | } |
214 | 196 | ||
215 | static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | 197 | static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, |
216 | u64 *reg) | 198 | u32 address, u64 *reg) |
217 | { | 199 | { |
218 | struct kvm_s390_local_interrupt *li; | 200 | struct kvm_s390_local_interrupt *li; |
219 | struct kvm_vcpu *dst_vcpu = NULL; | ||
220 | struct kvm_s390_interrupt_info *inti; | ||
221 | int rc; | 201 | int rc; |
222 | 202 | ||
223 | if (cpu_addr < KVM_MAX_VCPUS) | ||
224 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | ||
225 | if (!dst_vcpu) | ||
226 | return SIGP_CC_NOT_OPERATIONAL; | ||
227 | li = &dst_vcpu->arch.local_int; | 203 | li = &dst_vcpu->arch.local_int; |
228 | 204 | ||
229 | /* | 205 | /* |
@@ -238,46 +214,34 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
238 | return SIGP_CC_STATUS_STORED; | 214 | return SIGP_CC_STATUS_STORED; |
239 | } | 215 | } |
240 | 216 | ||
241 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
242 | if (!inti) | ||
243 | return SIGP_CC_BUSY; | ||
244 | |||
245 | spin_lock(&li->lock); | 217 | spin_lock(&li->lock); |
246 | /* cpu must be in stopped state */ | 218 | /* cpu must be in stopped state */ |
247 | if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { | 219 | if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { |
248 | *reg &= 0xffffffff00000000UL; | 220 | *reg &= 0xffffffff00000000UL; |
249 | *reg |= SIGP_STATUS_INCORRECT_STATE; | 221 | *reg |= SIGP_STATUS_INCORRECT_STATE; |
250 | rc = SIGP_CC_STATUS_STORED; | 222 | rc = SIGP_CC_STATUS_STORED; |
251 | kfree(inti); | ||
252 | goto out_li; | 223 | goto out_li; |
253 | } | 224 | } |
254 | 225 | ||
255 | inti->type = KVM_S390_SIGP_SET_PREFIX; | 226 | li->irq.prefix.address = address; |
256 | inti->prefix.address = address; | 227 | set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); |
257 | |||
258 | list_add_tail(&inti->list, &li->list); | ||
259 | atomic_set(&li->active, 1); | ||
260 | kvm_s390_vcpu_wakeup(dst_vcpu); | 228 | kvm_s390_vcpu_wakeup(dst_vcpu); |
261 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; | 229 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
262 | 230 | ||
263 | VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); | 231 | VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id, |
232 | address); | ||
264 | out_li: | 233 | out_li: |
265 | spin_unlock(&li->lock); | 234 | spin_unlock(&li->lock); |
266 | return rc; | 235 | return rc; |
267 | } | 236 | } |
268 | 237 | ||
269 | static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id, | 238 | static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, |
270 | u32 addr, u64 *reg) | 239 | struct kvm_vcpu *dst_vcpu, |
240 | u32 addr, u64 *reg) | ||
271 | { | 241 | { |
272 | struct kvm_vcpu *dst_vcpu = NULL; | ||
273 | int flags; | 242 | int flags; |
274 | int rc; | 243 | int rc; |
275 | 244 | ||
276 | if (cpu_id < KVM_MAX_VCPUS) | ||
277 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id); | ||
278 | if (!dst_vcpu) | ||
279 | return SIGP_CC_NOT_OPERATIONAL; | ||
280 | |||
281 | spin_lock(&dst_vcpu->arch.local_int.lock); | 245 | spin_lock(&dst_vcpu->arch.local_int.lock); |
282 | flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); | 246 | flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); |
283 | spin_unlock(&dst_vcpu->arch.local_int.lock); | 247 | spin_unlock(&dst_vcpu->arch.local_int.lock); |
@@ -297,19 +261,12 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id, | |||
297 | return rc; | 261 | return rc; |
298 | } | 262 | } |
299 | 263 | ||
300 | static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, | 264 | static int __sigp_sense_running(struct kvm_vcpu *vcpu, |
301 | u64 *reg) | 265 | struct kvm_vcpu *dst_vcpu, u64 *reg) |
302 | { | 266 | { |
303 | struct kvm_s390_local_interrupt *li; | 267 | struct kvm_s390_local_interrupt *li; |
304 | struct kvm_vcpu *dst_vcpu = NULL; | ||
305 | int rc; | 268 | int rc; |
306 | 269 | ||
307 | if (cpu_addr >= KVM_MAX_VCPUS) | ||
308 | return SIGP_CC_NOT_OPERATIONAL; | ||
309 | |||
310 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | ||
311 | if (!dst_vcpu) | ||
312 | return SIGP_CC_NOT_OPERATIONAL; | ||
313 | li = &dst_vcpu->arch.local_int; | 270 | li = &dst_vcpu->arch.local_int; |
314 | if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { | 271 | if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { |
315 | /* running */ | 272 | /* running */ |
@@ -321,26 +278,19 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, | |||
321 | rc = SIGP_CC_STATUS_STORED; | 278 | rc = SIGP_CC_STATUS_STORED; |
322 | } | 279 | } |
323 | 280 | ||
324 | VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr, | 281 | VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", |
325 | rc); | 282 | dst_vcpu->vcpu_id, rc); |
326 | 283 | ||
327 | return rc; | 284 | return rc; |
328 | } | 285 | } |
329 | 286 | ||
330 | /* Test whether the destination CPU is available and not busy */ | 287 | static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu, |
331 | static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) | 288 | struct kvm_vcpu *dst_vcpu, u8 order_code) |
332 | { | 289 | { |
333 | struct kvm_s390_local_interrupt *li; | 290 | struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; |
334 | int rc = SIGP_CC_ORDER_CODE_ACCEPTED; | 291 | /* handle (RE)START in user space */ |
335 | struct kvm_vcpu *dst_vcpu = NULL; | 292 | int rc = -EOPNOTSUPP; |
336 | |||
337 | if (cpu_addr >= KVM_MAX_VCPUS) | ||
338 | return SIGP_CC_NOT_OPERATIONAL; | ||
339 | 293 | ||
340 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | ||
341 | if (!dst_vcpu) | ||
342 | return SIGP_CC_NOT_OPERATIONAL; | ||
343 | li = &dst_vcpu->arch.local_int; | ||
344 | spin_lock(&li->lock); | 294 | spin_lock(&li->lock); |
345 | if (li->action_bits & ACTION_STOP_ON_STOP) | 295 | if (li->action_bits & ACTION_STOP_ON_STOP) |
346 | rc = SIGP_CC_BUSY; | 296 | rc = SIGP_CC_BUSY; |
@@ -349,90 +299,131 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
349 | return rc; | 299 | return rc; |
350 | } | 300 | } |
351 | 301 | ||
352 | int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) | 302 | static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu, |
303 | struct kvm_vcpu *dst_vcpu, u8 order_code) | ||
353 | { | 304 | { |
354 | int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | 305 | /* handle (INITIAL) CPU RESET in user space */ |
355 | int r3 = vcpu->arch.sie_block->ipa & 0x000f; | 306 | return -EOPNOTSUPP; |
356 | u32 parameter; | 307 | } |
357 | u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; | ||
358 | u8 order_code; | ||
359 | int rc; | ||
360 | 308 | ||
361 | /* sigp in userspace can exit */ | 309 | static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu, |
362 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 310 | struct kvm_vcpu *dst_vcpu) |
363 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 311 | { |
312 | /* handle unknown orders in user space */ | ||
313 | return -EOPNOTSUPP; | ||
314 | } | ||
364 | 315 | ||
365 | order_code = kvm_s390_get_base_disp_rs(vcpu); | 316 | static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, |
317 | u16 cpu_addr, u32 parameter, u64 *status_reg) | ||
318 | { | ||
319 | int rc; | ||
320 | struct kvm_vcpu *dst_vcpu; | ||
366 | 321 | ||
367 | if (r1 % 2) | 322 | if (cpu_addr >= KVM_MAX_VCPUS) |
368 | parameter = vcpu->run->s.regs.gprs[r1]; | 323 | return SIGP_CC_NOT_OPERATIONAL; |
369 | else | 324 | |
370 | parameter = vcpu->run->s.regs.gprs[r1 + 1]; | 325 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); |
326 | if (!dst_vcpu) | ||
327 | return SIGP_CC_NOT_OPERATIONAL; | ||
371 | 328 | ||
372 | trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter); | ||
373 | switch (order_code) { | 329 | switch (order_code) { |
374 | case SIGP_SENSE: | 330 | case SIGP_SENSE: |
375 | vcpu->stat.instruction_sigp_sense++; | 331 | vcpu->stat.instruction_sigp_sense++; |
376 | rc = __sigp_sense(vcpu, cpu_addr, | 332 | rc = __sigp_sense(vcpu, dst_vcpu, status_reg); |
377 | &vcpu->run->s.regs.gprs[r1]); | ||
378 | break; | 333 | break; |
379 | case SIGP_EXTERNAL_CALL: | 334 | case SIGP_EXTERNAL_CALL: |
380 | vcpu->stat.instruction_sigp_external_call++; | 335 | vcpu->stat.instruction_sigp_external_call++; |
381 | rc = __sigp_external_call(vcpu, cpu_addr); | 336 | rc = __sigp_external_call(vcpu, dst_vcpu); |
382 | break; | 337 | break; |
383 | case SIGP_EMERGENCY_SIGNAL: | 338 | case SIGP_EMERGENCY_SIGNAL: |
384 | vcpu->stat.instruction_sigp_emergency++; | 339 | vcpu->stat.instruction_sigp_emergency++; |
385 | rc = __sigp_emergency(vcpu, cpu_addr); | 340 | rc = __sigp_emergency(vcpu, dst_vcpu); |
386 | break; | 341 | break; |
387 | case SIGP_STOP: | 342 | case SIGP_STOP: |
388 | vcpu->stat.instruction_sigp_stop++; | 343 | vcpu->stat.instruction_sigp_stop++; |
389 | rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP); | 344 | rc = __sigp_stop(vcpu, dst_vcpu); |
390 | break; | 345 | break; |
391 | case SIGP_STOP_AND_STORE_STATUS: | 346 | case SIGP_STOP_AND_STORE_STATUS: |
392 | vcpu->stat.instruction_sigp_stop++; | 347 | vcpu->stat.instruction_sigp_stop_store_status++; |
393 | rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP | | 348 | rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg); |
394 | ACTION_STOP_ON_STOP); | ||
395 | break; | 349 | break; |
396 | case SIGP_STORE_STATUS_AT_ADDRESS: | 350 | case SIGP_STORE_STATUS_AT_ADDRESS: |
397 | rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter, | 351 | vcpu->stat.instruction_sigp_store_status++; |
398 | &vcpu->run->s.regs.gprs[r1]); | 352 | rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter, |
399 | break; | 353 | status_reg); |
400 | case SIGP_SET_ARCHITECTURE: | ||
401 | vcpu->stat.instruction_sigp_arch++; | ||
402 | rc = __sigp_set_arch(vcpu, parameter); | ||
403 | break; | 354 | break; |
404 | case SIGP_SET_PREFIX: | 355 | case SIGP_SET_PREFIX: |
405 | vcpu->stat.instruction_sigp_prefix++; | 356 | vcpu->stat.instruction_sigp_prefix++; |
406 | rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, | 357 | rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg); |
407 | &vcpu->run->s.regs.gprs[r1]); | ||
408 | break; | 358 | break; |
409 | case SIGP_COND_EMERGENCY_SIGNAL: | 359 | case SIGP_COND_EMERGENCY_SIGNAL: |
410 | rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter, | 360 | vcpu->stat.instruction_sigp_cond_emergency++; |
411 | &vcpu->run->s.regs.gprs[r1]); | 361 | rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter, |
362 | status_reg); | ||
412 | break; | 363 | break; |
413 | case SIGP_SENSE_RUNNING: | 364 | case SIGP_SENSE_RUNNING: |
414 | vcpu->stat.instruction_sigp_sense_running++; | 365 | vcpu->stat.instruction_sigp_sense_running++; |
415 | rc = __sigp_sense_running(vcpu, cpu_addr, | 366 | rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg); |
416 | &vcpu->run->s.regs.gprs[r1]); | ||
417 | break; | 367 | break; |
418 | case SIGP_START: | 368 | case SIGP_START: |
419 | rc = sigp_check_callable(vcpu, cpu_addr); | 369 | vcpu->stat.instruction_sigp_start++; |
420 | if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) | 370 | rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); |
421 | rc = -EOPNOTSUPP; /* Handle START in user space */ | ||
422 | break; | 371 | break; |
423 | case SIGP_RESTART: | 372 | case SIGP_RESTART: |
424 | vcpu->stat.instruction_sigp_restart++; | 373 | vcpu->stat.instruction_sigp_restart++; |
425 | rc = sigp_check_callable(vcpu, cpu_addr); | 374 | rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); |
426 | if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) { | 375 | break; |
427 | VCPU_EVENT(vcpu, 4, | 376 | case SIGP_INITIAL_CPU_RESET: |
428 | "sigp restart %x to handle userspace", | 377 | vcpu->stat.instruction_sigp_init_cpu_reset++; |
429 | cpu_addr); | 378 | rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); |
430 | /* user space must know about restart */ | 379 | break; |
431 | rc = -EOPNOTSUPP; | 380 | case SIGP_CPU_RESET: |
432 | } | 381 | vcpu->stat.instruction_sigp_cpu_reset++; |
382 | rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); | ||
383 | break; | ||
384 | default: | ||
385 | vcpu->stat.instruction_sigp_unknown++; | ||
386 | rc = __prepare_sigp_unknown(vcpu, dst_vcpu); | ||
387 | } | ||
388 | |||
389 | if (rc == -EOPNOTSUPP) | ||
390 | VCPU_EVENT(vcpu, 4, | ||
391 | "sigp order %u -> cpu %x: handled in user space", | ||
392 | order_code, dst_vcpu->vcpu_id); | ||
393 | |||
394 | return rc; | ||
395 | } | ||
396 | |||
397 | int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) | ||
398 | { | ||
399 | int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | ||
400 | int r3 = vcpu->arch.sie_block->ipa & 0x000f; | ||
401 | u32 parameter; | ||
402 | u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; | ||
403 | u8 order_code; | ||
404 | int rc; | ||
405 | |||
406 | /* sigp in userspace can exit */ | ||
407 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
408 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
409 | |||
410 | order_code = kvm_s390_get_base_disp_rs(vcpu); | ||
411 | |||
412 | if (r1 % 2) | ||
413 | parameter = vcpu->run->s.regs.gprs[r1]; | ||
414 | else | ||
415 | parameter = vcpu->run->s.regs.gprs[r1 + 1]; | ||
416 | |||
417 | trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter); | ||
418 | switch (order_code) { | ||
419 | case SIGP_SET_ARCHITECTURE: | ||
420 | vcpu->stat.instruction_sigp_arch++; | ||
421 | rc = __sigp_set_arch(vcpu, parameter); | ||
433 | break; | 422 | break; |
434 | default: | 423 | default: |
435 | return -EOPNOTSUPP; | 424 | rc = handle_sigp_dst(vcpu, order_code, cpu_addr, |
425 | parameter, | ||
426 | &vcpu->run->s.regs.gprs[r1]); | ||
436 | } | 427 | } |
437 | 428 | ||
438 | if (rc < 0) | 429 | if (rc < 0) |