aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2013-03-05 07:14:43 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2013-03-07 14:21:21 -0500
commitdc5008b9bf6adb0c0a5afba6fb376a85451b2697 (patch)
treeb52050bd00f8fc5e8897f10497a3bbf051cb0155
parent59a1fa2d80c0d351755cb29273b2b256dc4b3a11 (diff)
s390/kvm: remove explicit -EFAULT return code checking on guest access
Let's change to the paradigm that every return code from guest memory access functions that is not zero translates to -EFAULT and do not explictly compare. Explictly comparing the return value with -EFAULT has already shown to be a bit fragile. In addition this is closer to the handling of copy_to/from_user functions, which imho is in general a good idea. Also shorten the return code handling in interrupt.c a bit. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Acked-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/s390/kvm/intercept.c4
-rw-r--r--arch/s390/kvm/interrupt.c241
-rw-r--r--arch/s390/kvm/priv.c6
3 files changed, 74 insertions, 177 deletions
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index f26ff1e31bdb..9b2204759445 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -45,7 +45,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
45 do { 45 do {
46 rc = get_guest_u64(vcpu, useraddr, 46 rc = get_guest_u64(vcpu, useraddr,
47 &vcpu->arch.sie_block->gcr[reg]); 47 &vcpu->arch.sie_block->gcr[reg]);
48 if (rc == -EFAULT) { 48 if (rc) {
49 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 49 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
50 break; 50 break;
51 } 51 }
@@ -79,7 +79,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
79 reg = reg1; 79 reg = reg1;
80 do { 80 do {
81 rc = get_guest_u32(vcpu, useraddr, &val); 81 rc = get_guest_u32(vcpu, useraddr, &val);
82 if (rc == -EFAULT) { 82 if (rc) {
83 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 83 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
84 break; 84 break;
85 } 85 }
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 37116a77cb4b..5afa931aed11 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -180,7 +180,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
180 struct kvm_s390_interrupt_info *inti) 180 struct kvm_s390_interrupt_info *inti)
181{ 181{
182 const unsigned short table[] = { 2, 4, 4, 6 }; 182 const unsigned short table[] = { 2, 4, 4, 6 };
183 int rc, exception = 0; 183 int rc = 0;
184 184
185 switch (inti->type) { 185 switch (inti->type) {
186 case KVM_S390_INT_EMERGENCY: 186 case KVM_S390_INT_EMERGENCY:
@@ -188,74 +188,38 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
188 vcpu->stat.deliver_emergency_signal++; 188 vcpu->stat.deliver_emergency_signal++;
189 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 189 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
190 inti->emerg.code, 0); 190 inti->emerg.code, 0);
191 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201); 191 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
192 if (rc == -EFAULT) 192 rc |= put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code);
193 exception = 1; 193 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
194 194 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
195 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code); 195 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
196 if (rc == -EFAULT) 196 __LC_EXT_NEW_PSW, sizeof(psw_t));
197 exception = 1;
198
199 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
200 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
201 if (rc == -EFAULT)
202 exception = 1;
203
204 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
205 __LC_EXT_NEW_PSW, sizeof(psw_t));
206 if (rc == -EFAULT)
207 exception = 1;
208 break; 197 break;
209
210 case KVM_S390_INT_EXTERNAL_CALL: 198 case KVM_S390_INT_EXTERNAL_CALL:
211 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); 199 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
212 vcpu->stat.deliver_external_call++; 200 vcpu->stat.deliver_external_call++;
213 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 201 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
214 inti->extcall.code, 0); 202 inti->extcall.code, 0);
215 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202); 203 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202);
216 if (rc == -EFAULT) 204 rc |= put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code);
217 exception = 1; 205 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
218 206 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
219 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code); 207 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
220 if (rc == -EFAULT) 208 __LC_EXT_NEW_PSW, sizeof(psw_t));
221 exception = 1;
222
223 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
224 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
225 if (rc == -EFAULT)
226 exception = 1;
227
228 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
229 __LC_EXT_NEW_PSW, sizeof(psw_t));
230 if (rc == -EFAULT)
231 exception = 1;
232 break; 209 break;
233
234 case KVM_S390_INT_SERVICE: 210 case KVM_S390_INT_SERVICE:
235 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", 211 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
236 inti->ext.ext_params); 212 inti->ext.ext_params);
237 vcpu->stat.deliver_service_signal++; 213 vcpu->stat.deliver_service_signal++;
238 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 214 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
239 inti->ext.ext_params, 0); 215 inti->ext.ext_params, 0);
240 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401); 216 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
241 if (rc == -EFAULT) 217 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
242 exception = 1; 218 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
243 219 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
244 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 220 __LC_EXT_NEW_PSW, sizeof(psw_t));
245 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 221 rc |= put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
246 if (rc == -EFAULT)
247 exception = 1;
248
249 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
250 __LC_EXT_NEW_PSW, sizeof(psw_t));
251 if (rc == -EFAULT)
252 exception = 1;
253
254 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
255 if (rc == -EFAULT)
256 exception = 1;
257 break; 222 break;
258
259 case KVM_S390_INT_VIRTIO: 223 case KVM_S390_INT_VIRTIO:
260 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", 224 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
261 inti->ext.ext_params, inti->ext.ext_params2); 225 inti->ext.ext_params, inti->ext.ext_params2);
@@ -263,34 +227,16 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
263 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 227 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
264 inti->ext.ext_params, 228 inti->ext.ext_params,
265 inti->ext.ext_params2); 229 inti->ext.ext_params2);
266 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603); 230 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
267 if (rc == -EFAULT) 231 rc |= put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00);
268 exception = 1; 232 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
269 233 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
270 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00); 234 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
271 if (rc == -EFAULT) 235 __LC_EXT_NEW_PSW, sizeof(psw_t));
272 exception = 1; 236 rc |= put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
273 237 rc |= put_guest_u64(vcpu, __LC_EXT_PARAMS2,
274 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 238 inti->ext.ext_params2);
275 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
276 if (rc == -EFAULT)
277 exception = 1;
278
279 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
280 __LC_EXT_NEW_PSW, sizeof(psw_t));
281 if (rc == -EFAULT)
282 exception = 1;
283
284 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
285 if (rc == -EFAULT)
286 exception = 1;
287
288 rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
289 inti->ext.ext_params2);
290 if (rc == -EFAULT)
291 exception = 1;
292 break; 239 break;
293
294 case KVM_S390_SIGP_STOP: 240 case KVM_S390_SIGP_STOP:
295 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); 241 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
296 vcpu->stat.deliver_stop_signal++; 242 vcpu->stat.deliver_stop_signal++;
@@ -313,18 +259,14 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
313 vcpu->stat.deliver_restart_signal++; 259 vcpu->stat.deliver_restart_signal++;
314 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 260 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
315 0, 0); 261 0, 0);
316 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, 262 rc = copy_to_guest(vcpu,
317 restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 263 offsetof(struct _lowcore, restart_old_psw),
318 if (rc == -EFAULT) 264 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
319 exception = 1; 265 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
320 266 offsetof(struct _lowcore, restart_psw),
321 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 267 sizeof(psw_t));
322 offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
323 if (rc == -EFAULT)
324 exception = 1;
325 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 268 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
326 break; 269 break;
327
328 case KVM_S390_PROGRAM_INT: 270 case KVM_S390_PROGRAM_INT:
329 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", 271 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
330 inti->pgm.code, 272 inti->pgm.code,
@@ -332,24 +274,13 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
332 vcpu->stat.deliver_program_int++; 274 vcpu->stat.deliver_program_int++;
333 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 275 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
334 inti->pgm.code, 0); 276 inti->pgm.code, 0);
335 rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code); 277 rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
336 if (rc == -EFAULT) 278 rc |= put_guest_u16(vcpu, __LC_PGM_ILC,
337 exception = 1; 279 table[vcpu->arch.sie_block->ipa >> 14]);
338 280 rc |= copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
339 rc = put_guest_u16(vcpu, __LC_PGM_ILC, 281 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
340 table[vcpu->arch.sie_block->ipa >> 14]); 282 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
341 if (rc == -EFAULT) 283 __LC_PGM_NEW_PSW, sizeof(psw_t));
342 exception = 1;
343
344 rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
345 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
346 if (rc == -EFAULT)
347 exception = 1;
348
349 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
350 __LC_PGM_NEW_PSW, sizeof(psw_t));
351 if (rc == -EFAULT)
352 exception = 1;
353 break; 284 break;
354 285
355 case KVM_S390_MCHK: 286 case KVM_S390_MCHK:
@@ -358,24 +289,13 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
358 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 289 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
359 inti->mchk.cr14, 290 inti->mchk.cr14,
360 inti->mchk.mcic); 291 inti->mchk.mcic);
361 rc = kvm_s390_vcpu_store_status(vcpu, 292 rc = kvm_s390_vcpu_store_status(vcpu,
362 KVM_S390_STORE_STATUS_PREFIXED); 293 KVM_S390_STORE_STATUS_PREFIXED);
363 if (rc == -EFAULT) 294 rc |= put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic);
364 exception = 1; 295 rc |= copy_to_guest(vcpu, __LC_MCK_OLD_PSW,
365 296 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
366 rc = put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic); 297 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
367 if (rc == -EFAULT) 298 __LC_MCK_NEW_PSW, sizeof(psw_t));
368 exception = 1;
369
370 rc = copy_to_guest(vcpu, __LC_MCK_OLD_PSW,
371 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
372 if (rc == -EFAULT)
373 exception = 1;
374
375 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
376 __LC_MCK_NEW_PSW, sizeof(psw_t));
377 if (rc == -EFAULT)
378 exception = 1;
379 break; 299 break;
380 300
381 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 301 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
@@ -388,67 +308,44 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
388 vcpu->stat.deliver_io_int++; 308 vcpu->stat.deliver_io_int++;
389 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 309 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
390 param0, param1); 310 param0, param1);
391 rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_ID, 311 rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_ID,
392 inti->io.subchannel_id); 312 inti->io.subchannel_id);
393 if (rc == -EFAULT) 313 rc |= put_guest_u16(vcpu, __LC_SUBCHANNEL_NR,
394 exception = 1; 314 inti->io.subchannel_nr);
395 315 rc |= put_guest_u32(vcpu, __LC_IO_INT_PARM,
396 rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_NR, 316 inti->io.io_int_parm);
397 inti->io.subchannel_nr); 317 rc |= put_guest_u32(vcpu, __LC_IO_INT_WORD,
398 if (rc == -EFAULT) 318 inti->io.io_int_word);
399 exception = 1; 319 rc |= copy_to_guest(vcpu, __LC_IO_OLD_PSW,
400 320 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
401 rc = put_guest_u32(vcpu, __LC_IO_INT_PARM, 321 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
402 inti->io.io_int_parm); 322 __LC_IO_NEW_PSW, sizeof(psw_t));
403 if (rc == -EFAULT)
404 exception = 1;
405
406 rc = put_guest_u32(vcpu, __LC_IO_INT_WORD,
407 inti->io.io_int_word);
408 if (rc == -EFAULT)
409 exception = 1;
410
411 rc = copy_to_guest(vcpu, __LC_IO_OLD_PSW,
412 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
413 if (rc == -EFAULT)
414 exception = 1;
415
416 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
417 __LC_IO_NEW_PSW, sizeof(psw_t));
418 if (rc == -EFAULT)
419 exception = 1;
420 break; 323 break;
421 } 324 }
422 default: 325 default:
423 BUG(); 326 BUG();
424 } 327 }
425 if (exception) { 328 if (rc) {
426 printk("kvm: The guest lowcore is not mapped during interrupt " 329 printk("kvm: The guest lowcore is not mapped during interrupt "
427 "delivery, killing userspace\n"); 330 "delivery, killing userspace\n");
428 do_exit(SIGKILL); 331 do_exit(SIGKILL);
429 } 332 }
430} 333}
431 334
432static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) 335static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
433{ 336{
434 int rc, exception = 0; 337 int rc;
435 338
436 if (psw_extint_disabled(vcpu)) 339 if (psw_extint_disabled(vcpu))
437 return 0; 340 return 0;
438 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 341 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
439 return 0; 342 return 0;
440 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004); 343 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
441 if (rc == -EFAULT) 344 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
442 exception = 1; 345 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
443 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 346 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
444 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 347 __LC_EXT_NEW_PSW, sizeof(psw_t));
445 if (rc == -EFAULT) 348 if (rc) {
446 exception = 1;
447 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
448 __LC_EXT_NEW_PSW, sizeof(psw_t));
449 if (rc == -EFAULT)
450 exception = 1;
451 if (exception) {
452 printk("kvm: The guest lowcore is not mapped during interrupt " 349 printk("kvm: The guest lowcore is not mapped during interrupt "
453 "delivery, killing userspace\n"); 350 "delivery, killing userspace\n");
454 do_exit(SIGKILL); 351 do_exit(SIGKILL);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 75ad91e38e8a..34b42dc285ee 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -108,7 +108,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
108 } 108 }
109 109
110 rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id); 110 rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id);
111 if (rc == -EFAULT) { 111 if (rc) {
112 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 112 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
113 goto out; 113 goto out;
114 } 114 }
@@ -230,7 +230,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
230 230
231 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), 231 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
232 &facility_list, sizeof(facility_list)); 232 &facility_list, sizeof(facility_list));
233 if (rc == -EFAULT) 233 if (rc)
234 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 234 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
235 else { 235 else {
236 VCPU_EVENT(vcpu, 5, "store facility list value %x", 236 VCPU_EVENT(vcpu, 5, "store facility list value %x",
@@ -348,7 +348,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
348 } 348 }
349 349
350 rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data); 350 rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data);
351 if (rc == -EFAULT) { 351 if (rc) {
352 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 352 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
353 goto out; 353 goto out;
354 } 354 }