aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorThomas Huth <thuth@linux.vnet.ibm.com>2013-06-20 11:22:01 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2013-06-20 17:31:43 -0400
commit5087dfa6c8b9f7893819f315eb24201ff5c07142 (patch)
tree22a3f735bc6f6cc3398b4ca53947438494e4ae43 /arch/s390
parentf9f6bbc6991f2ba21bfaff90f4060f2df766ca20 (diff)
KVM: s390: Privileged operation checks moved to instruction handlers
We need more fine-grained control about the point in time when we check for privileged instructions, since the exceptions that can happen during an instruction have a well-defined priority. For example, for the PFMF instruction, the check for PGM_PRIVILEGED_OP must happen after the check for PGM_OPERATION since the latter has a higher precedence - thus the check for privileged operation must not be done in kvm_s390_handle_b9() already. Signed-off-by: Thomas Huth <thuth@linux.vnet.ibm.com> Acked-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/kvm/priv.c63
1 files changed, 39 insertions, 24 deletions
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 04dc4a143964..0b19e2226955 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -37,6 +37,9 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
37 37
38 vcpu->stat.instruction_spx++; 38 vcpu->stat.instruction_spx++;
39 39
40 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
41 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
42
40 operand2 = kvm_s390_get_base_disp_s(vcpu); 43 operand2 = kvm_s390_get_base_disp_s(vcpu);
41 44
42 /* must be word boundary */ 45 /* must be word boundary */
@@ -68,6 +71,9 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
68 71
69 vcpu->stat.instruction_stpx++; 72 vcpu->stat.instruction_stpx++;
70 73
74 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
75 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
76
71 operand2 = kvm_s390_get_base_disp_s(vcpu); 77 operand2 = kvm_s390_get_base_disp_s(vcpu);
72 78
73 /* must be word boundary */ 79 /* must be word boundary */
@@ -92,6 +98,9 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
92 98
93 vcpu->stat.instruction_stap++; 99 vcpu->stat.instruction_stap++;
94 100
101 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
102 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
103
95 useraddr = kvm_s390_get_base_disp_s(vcpu); 104 useraddr = kvm_s390_get_base_disp_s(vcpu);
96 105
97 if (useraddr & 1) 106 if (useraddr & 1)
@@ -108,6 +117,10 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
108static int handle_skey(struct kvm_vcpu *vcpu) 117static int handle_skey(struct kvm_vcpu *vcpu)
109{ 118{
110 vcpu->stat.instruction_storage_key++; 119 vcpu->stat.instruction_storage_key++;
120
121 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
122 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
123
111 vcpu->arch.sie_block->gpsw.addr = 124 vcpu->arch.sie_block->gpsw.addr =
112 __rewind_psw(vcpu->arch.sie_block->gpsw, 4); 125 __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
113 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 126 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
@@ -186,6 +199,9 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
186{ 199{
187 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); 200 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
188 201
202 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
203 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
204
189 if (vcpu->kvm->arch.css_support) { 205 if (vcpu->kvm->arch.css_support) {
190 /* 206 /*
191 * Most I/O instructions will be handled by userspace. 207 * Most I/O instructions will be handled by userspace.
@@ -214,6 +230,10 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
214 int rc; 230 int rc;
215 231
216 vcpu->stat.instruction_stfl++; 232 vcpu->stat.instruction_stfl++;
233
234 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
235 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
236
217 /* only pass the facility bits, which we can handle */ 237 /* only pass the facility bits, which we can handle */
218 facility_list = S390_lowcore.stfl_fac_list & 0xff82fff3; 238 facility_list = S390_lowcore.stfl_fac_list & 0xff82fff3;
219 239
@@ -282,6 +302,9 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
282 psw_t new_psw; 302 psw_t new_psw;
283 u64 addr; 303 u64 addr;
284 304
305 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
306 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
307
285 addr = kvm_s390_get_base_disp_s(vcpu); 308 addr = kvm_s390_get_base_disp_s(vcpu);
286 if (addr & 7) 309 if (addr & 7)
287 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 310 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -300,6 +323,9 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
300 323
301 vcpu->stat.instruction_stidp++; 324 vcpu->stat.instruction_stidp++;
302 325
326 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
327 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
328
303 operand2 = kvm_s390_get_base_disp_s(vcpu); 329 operand2 = kvm_s390_get_base_disp_s(vcpu);
304 330
305 if (operand2 & 7) 331 if (operand2 & 7)
@@ -355,6 +381,9 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
355 vcpu->stat.instruction_stsi++; 381 vcpu->stat.instruction_stsi++;
356 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 382 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
357 383
384 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
385 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
386
358 operand2 = kvm_s390_get_base_disp_s(vcpu); 387 operand2 = kvm_s390_get_base_disp_s(vcpu);
359 388
360 if (operand2 & 0xfff && fc > 0) 389 if (operand2 & 0xfff && fc > 0)
@@ -436,20 +465,14 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
436 intercept_handler_t handler; 465 intercept_handler_t handler;
437 466
438 /* 467 /*
439 * a lot of B2 instructions are priviledged. We first check for 468 * A lot of B2 instructions are priviledged. Here we check for
440 * the privileged ones, that we can handle in the kernel. If the 469 * the privileged ones, that we can handle in the kernel.
441 * kernel can handle this instruction, we check for the problem 470 * Anything else goes to userspace.
442 * state bit and (a) handle the instruction or (b) send a code 2 471 */
443 * program check.
444 * Anything else goes to userspace.*/
445 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 472 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
446 if (handler) { 473 if (handler)
447 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 474 return handler(vcpu);
448 return kvm_s390_inject_program_int(vcpu, 475
449 PGM_PRIVILEGED_OP);
450 else
451 return handler(vcpu);
452 }
453 return -EOPNOTSUPP; 476 return -EOPNOTSUPP;
454} 477}
455 478
@@ -560,14 +583,9 @@ int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
560 583
561 /* This is handled just as for the B2 instructions. */ 584 /* This is handled just as for the B2 instructions. */
562 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 585 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
563 if (handler) { 586 if (handler)
564 if ((handler != handle_epsw) && 587 return handler(vcpu);
565 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)) 588
566 return kvm_s390_inject_program_int(vcpu,
567 PGM_PRIVILEGED_OP);
568 else
569 return handler(vcpu);
570 }
571 return -EOPNOTSUPP; 589 return -EOPNOTSUPP;
572} 590}
573 591
@@ -579,9 +597,6 @@ int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu)
579{ 597{
580 intercept_handler_t handler; 598 intercept_handler_t handler;
581 599
582 /* All eb instructions that end up here are privileged. */
583 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
584 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
585 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; 600 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
586 if (handler) 601 if (handler)
587 return handler(vcpu); 602 return handler(vcpu);