aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/priv.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kvm/priv.c')
-rw-r--r--arch/s390/kvm/priv.c272
1 files changed, 203 insertions, 69 deletions
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 476e9e218f43..27f9051a78f8 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -35,8 +35,8 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
35{ 35{
36 struct kvm_vcpu *cpup; 36 struct kvm_vcpu *cpup;
37 s64 hostclk, val; 37 s64 hostclk, val;
38 int i, rc;
38 u64 op2; 39 u64 op2;
39 int i;
40 40
41 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 41 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
42 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 42 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -44,8 +44,9 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
44 op2 = kvm_s390_get_base_disp_s(vcpu); 44 op2 = kvm_s390_get_base_disp_s(vcpu);
45 if (op2 & 7) /* Operand must be on a doubleword boundary */ 45 if (op2 & 7) /* Operand must be on a doubleword boundary */
46 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 46 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
47 if (get_guest(vcpu, val, (u64 __user *) op2)) 47 rc = read_guest(vcpu, op2, &val, sizeof(val));
48 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 48 if (rc)
49 return kvm_s390_inject_prog_cond(vcpu, rc);
49 50
50 if (store_tod_clock(&hostclk)) { 51 if (store_tod_clock(&hostclk)) {
51 kvm_s390_set_psw_cc(vcpu, 3); 52 kvm_s390_set_psw_cc(vcpu, 3);
@@ -65,8 +66,8 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
65static int handle_set_prefix(struct kvm_vcpu *vcpu) 66static int handle_set_prefix(struct kvm_vcpu *vcpu)
66{ 67{
67 u64 operand2; 68 u64 operand2;
68 u32 address = 0; 69 u32 address;
69 u8 tmp; 70 int rc;
70 71
71 vcpu->stat.instruction_spx++; 72 vcpu->stat.instruction_spx++;
72 73
@@ -80,14 +81,18 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
80 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 81 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
81 82
82 /* get the value */ 83 /* get the value */
83 if (get_guest(vcpu, address, (u32 __user *) operand2)) 84 rc = read_guest(vcpu, operand2, &address, sizeof(address));
84 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 85 if (rc)
86 return kvm_s390_inject_prog_cond(vcpu, rc);
85 87
86 address = address & 0x7fffe000u; 88 address &= 0x7fffe000u;
87 89
88 /* make sure that the new value is valid memory */ 90 /*
89 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || 91 * Make sure the new value is valid memory. We only need to check the
90 (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) 92 * first page, since address is 8k aligned and memory pieces are always
93 * at least 1MB aligned and have at least a size of 1MB.
94 */
95 if (kvm_is_error_gpa(vcpu->kvm, address))
91 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 96 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
92 97
93 kvm_s390_set_prefix(vcpu, address); 98 kvm_s390_set_prefix(vcpu, address);
@@ -101,6 +106,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
101{ 106{
102 u64 operand2; 107 u64 operand2;
103 u32 address; 108 u32 address;
109 int rc;
104 110
105 vcpu->stat.instruction_stpx++; 111 vcpu->stat.instruction_stpx++;
106 112
@@ -117,8 +123,9 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
117 address = address & 0x7fffe000u; 123 address = address & 0x7fffe000u;
118 124
119 /* get the value */ 125 /* get the value */
120 if (put_guest(vcpu, address, (u32 __user *)operand2)) 126 rc = write_guest(vcpu, operand2, &address, sizeof(address));
121 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 127 if (rc)
128 return kvm_s390_inject_prog_cond(vcpu, rc);
122 129
123 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); 130 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
124 trace_kvm_s390_handle_prefix(vcpu, 0, address); 131 trace_kvm_s390_handle_prefix(vcpu, 0, address);
@@ -127,28 +134,44 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
127 134
128static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 135static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
129{ 136{
130 u64 useraddr; 137 u16 vcpu_id = vcpu->vcpu_id;
138 u64 ga;
139 int rc;
131 140
132 vcpu->stat.instruction_stap++; 141 vcpu->stat.instruction_stap++;
133 142
134 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 143 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
135 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 144 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
136 145
137 useraddr = kvm_s390_get_base_disp_s(vcpu); 146 ga = kvm_s390_get_base_disp_s(vcpu);
138 147
139 if (useraddr & 1) 148 if (ga & 1)
140 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 149 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
141 150
142 if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr)) 151 rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id));
143 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 152 if (rc)
153 return kvm_s390_inject_prog_cond(vcpu, rc);
144 154
145 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr); 155 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga);
146 trace_kvm_s390_handle_stap(vcpu, useraddr); 156 trace_kvm_s390_handle_stap(vcpu, ga);
147 return 0; 157 return 0;
148} 158}
149 159
160static void __skey_check_enable(struct kvm_vcpu *vcpu)
161{
162 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
163 return;
164
165 s390_enable_skey();
166 trace_kvm_s390_skey_related_inst(vcpu);
167 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
168}
169
170
150static int handle_skey(struct kvm_vcpu *vcpu) 171static int handle_skey(struct kvm_vcpu *vcpu)
151{ 172{
173 __skey_check_enable(vcpu);
174
152 vcpu->stat.instruction_storage_key++; 175 vcpu->stat.instruction_storage_key++;
153 176
154 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 177 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
@@ -160,9 +183,21 @@ static int handle_skey(struct kvm_vcpu *vcpu)
160 return 0; 183 return 0;
161} 184}
162 185
186static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
187{
188 psw_t *psw = &vcpu->arch.sie_block->gpsw;
189
190 vcpu->stat.instruction_ipte_interlock++;
191 if (psw_bits(*psw).p)
192 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
193 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
194 psw->addr = __rewind_psw(*psw, 4);
195 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
196 return 0;
197}
198
163static int handle_test_block(struct kvm_vcpu *vcpu) 199static int handle_test_block(struct kvm_vcpu *vcpu)
164{ 200{
165 unsigned long hva;
166 gpa_t addr; 201 gpa_t addr;
167 int reg2; 202 int reg2;
168 203
@@ -173,14 +208,13 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
173 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 208 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
174 addr = kvm_s390_real_to_abs(vcpu, addr); 209 addr = kvm_s390_real_to_abs(vcpu, addr);
175 210
176 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr)); 211 if (kvm_is_error_gpa(vcpu->kvm, addr))
177 if (kvm_is_error_hva(hva))
178 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 212 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
179 /* 213 /*
180 * We don't expect errors on modern systems, and do not care 214 * We don't expect errors on modern systems, and do not care
181 * about storage keys (yet), so let's just clear the page. 215 * about storage keys (yet), so let's just clear the page.
182 */ 216 */
183 if (clear_user((void __user *)hva, PAGE_SIZE) != 0) 217 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
184 return -EFAULT; 218 return -EFAULT;
185 kvm_s390_set_psw_cc(vcpu, 0); 219 kvm_s390_set_psw_cc(vcpu, 0);
186 vcpu->run->s.regs.gprs[0] = 0; 220 vcpu->run->s.regs.gprs[0] = 0;
@@ -190,9 +224,12 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
190static int handle_tpi(struct kvm_vcpu *vcpu) 224static int handle_tpi(struct kvm_vcpu *vcpu)
191{ 225{
192 struct kvm_s390_interrupt_info *inti; 226 struct kvm_s390_interrupt_info *inti;
227 unsigned long len;
228 u32 tpi_data[3];
229 int cc, rc;
193 u64 addr; 230 u64 addr;
194 int cc;
195 231
232 rc = 0;
196 addr = kvm_s390_get_base_disp_s(vcpu); 233 addr = kvm_s390_get_base_disp_s(vcpu);
197 if (addr & 3) 234 if (addr & 3)
198 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 235 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -201,30 +238,41 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
201 if (!inti) 238 if (!inti)
202 goto no_interrupt; 239 goto no_interrupt;
203 cc = 1; 240 cc = 1;
241 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
242 tpi_data[1] = inti->io.io_int_parm;
243 tpi_data[2] = inti->io.io_int_word;
204 if (addr) { 244 if (addr) {
205 /* 245 /*
206 * Store the two-word I/O interruption code into the 246 * Store the two-word I/O interruption code into the
207 * provided area. 247 * provided area.
208 */ 248 */
209 if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr) 249 len = sizeof(tpi_data) - 4;
210 || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2)) 250 rc = write_guest(vcpu, addr, &tpi_data, len);
211 || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4))) 251 if (rc)
212 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 252 return kvm_s390_inject_prog_cond(vcpu, rc);
213 } else { 253 } else {
214 /* 254 /*
215 * Store the three-word I/O interruption code into 255 * Store the three-word I/O interruption code into
216 * the appropriate lowcore area. 256 * the appropriate lowcore area.
217 */ 257 */
218 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID); 258 len = sizeof(tpi_data);
219 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR); 259 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
220 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM); 260 rc = -EFAULT;
221 put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
222 } 261 }
223 kfree(inti); 262 /*
263 * If we encounter a problem storing the interruption code, the
264 * instruction is suppressed from the guest's view: reinject the
265 * interrupt.
266 */
267 if (!rc)
268 kfree(inti);
269 else
270 kvm_s390_reinject_io_int(vcpu->kvm, inti);
224no_interrupt: 271no_interrupt:
225 /* Set condition code and we're done. */ 272 /* Set condition code and we're done. */
226 kvm_s390_set_psw_cc(vcpu, cc); 273 if (!rc)
227 return 0; 274 kvm_s390_set_psw_cc(vcpu, cc);
275 return rc ? -EFAULT : 0;
228} 276}
229 277
230static int handle_tsch(struct kvm_vcpu *vcpu) 278static int handle_tsch(struct kvm_vcpu *vcpu)
@@ -292,10 +340,10 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
292 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 340 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
293 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 341 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
294 342
295 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), 343 rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
296 vfacilities, 4); 344 vfacilities, 4);
297 if (rc) 345 if (rc)
298 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 346 return rc;
299 VCPU_EVENT(vcpu, 5, "store facility list value %x", 347 VCPU_EVENT(vcpu, 5, "store facility list value %x",
300 *(unsigned int *) vfacilities); 348 *(unsigned int *) vfacilities);
301 trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities); 349 trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
@@ -333,6 +381,7 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
333 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; 381 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
334 psw_compat_t new_psw; 382 psw_compat_t new_psw;
335 u64 addr; 383 u64 addr;
384 int rc;
336 385
337 if (gpsw->mask & PSW_MASK_PSTATE) 386 if (gpsw->mask & PSW_MASK_PSTATE)
338 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 387 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -340,8 +389,10 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
340 addr = kvm_s390_get_base_disp_s(vcpu); 389 addr = kvm_s390_get_base_disp_s(vcpu);
341 if (addr & 7) 390 if (addr & 7)
342 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 391 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
343 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) 392
344 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 393 rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
394 if (rc)
395 return kvm_s390_inject_prog_cond(vcpu, rc);
345 if (!(new_psw.mask & PSW32_MASK_BASE)) 396 if (!(new_psw.mask & PSW32_MASK_BASE))
346 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 397 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
347 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; 398 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
@@ -357,6 +408,7 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
357{ 408{
358 psw_t new_psw; 409 psw_t new_psw;
359 u64 addr; 410 u64 addr;
411 int rc;
360 412
361 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 413 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
362 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 414 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -364,8 +416,9 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
364 addr = kvm_s390_get_base_disp_s(vcpu); 416 addr = kvm_s390_get_base_disp_s(vcpu);
365 if (addr & 7) 417 if (addr & 7)
366 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 418 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
367 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) 419 rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
368 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 420 if (rc)
421 return kvm_s390_inject_prog_cond(vcpu, rc);
369 vcpu->arch.sie_block->gpsw = new_psw; 422 vcpu->arch.sie_block->gpsw = new_psw;
370 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) 423 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
371 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 424 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -375,7 +428,9 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
375 428
376static int handle_stidp(struct kvm_vcpu *vcpu) 429static int handle_stidp(struct kvm_vcpu *vcpu)
377{ 430{
431 u64 stidp_data = vcpu->arch.stidp_data;
378 u64 operand2; 432 u64 operand2;
433 int rc;
379 434
380 vcpu->stat.instruction_stidp++; 435 vcpu->stat.instruction_stidp++;
381 436
@@ -387,8 +442,9 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
387 if (operand2 & 7) 442 if (operand2 & 7)
388 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 443 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
389 444
390 if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2)) 445 rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data));
391 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 446 if (rc)
447 return kvm_s390_inject_prog_cond(vcpu, rc);
392 448
393 VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); 449 VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
394 return 0; 450 return 0;
@@ -474,9 +530,10 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
474 break; 530 break;
475 } 531 }
476 532
477 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { 533 rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE);
478 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 534 if (rc) {
479 goto out_exception; 535 rc = kvm_s390_inject_prog_cond(vcpu, rc);
536 goto out;
480 } 537 }
481 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 538 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
482 free_page(mem); 539 free_page(mem);
@@ -485,7 +542,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
485 return 0; 542 return 0;
486out_no_data: 543out_no_data:
487 kvm_s390_set_psw_cc(vcpu, 3); 544 kvm_s390_set_psw_cc(vcpu, 3);
488out_exception: 545out:
489 free_page(mem); 546 free_page(mem);
490 return rc; 547 return rc;
491} 548}
@@ -496,6 +553,7 @@ static const intercept_handler_t b2_handlers[256] = {
496 [0x10] = handle_set_prefix, 553 [0x10] = handle_set_prefix,
497 [0x11] = handle_store_prefix, 554 [0x11] = handle_store_prefix,
498 [0x12] = handle_store_cpu_address, 555 [0x12] = handle_store_cpu_address,
556 [0x21] = handle_ipte_interlock,
499 [0x29] = handle_skey, 557 [0x29] = handle_skey,
500 [0x2a] = handle_skey, 558 [0x2a] = handle_skey,
501 [0x2b] = handle_skey, 559 [0x2b] = handle_skey,
@@ -513,6 +571,7 @@ static const intercept_handler_t b2_handlers[256] = {
513 [0x3a] = handle_io_inst, 571 [0x3a] = handle_io_inst,
514 [0x3b] = handle_io_inst, 572 [0x3b] = handle_io_inst,
515 [0x3c] = handle_io_inst, 573 [0x3c] = handle_io_inst,
574 [0x50] = handle_ipte_interlock,
516 [0x5f] = handle_io_inst, 575 [0x5f] = handle_io_inst,
517 [0x74] = handle_io_inst, 576 [0x74] = handle_io_inst,
518 [0x76] = handle_io_inst, 577 [0x76] = handle_io_inst,
@@ -618,6 +677,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
618 } 677 }
619 678
620 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { 679 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
680 __skey_check_enable(vcpu);
621 if (set_guest_storage_key(current->mm, useraddr, 681 if (set_guest_storage_key(current->mm, useraddr,
622 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, 682 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
623 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) 683 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
@@ -642,7 +702,7 @@ static int handle_essa(struct kvm_vcpu *vcpu)
642 VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); 702 VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
643 gmap = vcpu->arch.gmap; 703 gmap = vcpu->arch.gmap;
644 vcpu->stat.instruction_essa++; 704 vcpu->stat.instruction_essa++;
645 if (!kvm_enabled_cmma() || !vcpu->arch.sie_block->cbrlo) 705 if (!kvm_s390_cmma_enabled(vcpu->kvm))
646 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 706 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
647 707
648 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 708 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
@@ -672,7 +732,10 @@ static int handle_essa(struct kvm_vcpu *vcpu)
672} 732}
673 733
674static const intercept_handler_t b9_handlers[256] = { 734static const intercept_handler_t b9_handlers[256] = {
735 [0x8a] = handle_ipte_interlock,
675 [0x8d] = handle_epsw, 736 [0x8d] = handle_epsw,
737 [0x8e] = handle_ipte_interlock,
738 [0x8f] = handle_ipte_interlock,
676 [0xab] = handle_essa, 739 [0xab] = handle_essa,
677 [0xaf] = handle_pfmf, 740 [0xaf] = handle_pfmf,
678}; 741};
@@ -693,32 +756,67 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
693{ 756{
694 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 757 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
695 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 758 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
696 u64 useraddr;
697 u32 val = 0; 759 u32 val = 0;
698 int reg, rc; 760 int reg, rc;
761 u64 ga;
699 762
700 vcpu->stat.instruction_lctl++; 763 vcpu->stat.instruction_lctl++;
701 764
702 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 765 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
703 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 766 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
704 767
705 useraddr = kvm_s390_get_base_disp_rs(vcpu); 768 ga = kvm_s390_get_base_disp_rs(vcpu);
706 769
707 if (useraddr & 3) 770 if (ga & 3)
708 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 771 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
709 772
710 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, 773 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
711 useraddr); 774 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
712 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
713 775
714 reg = reg1; 776 reg = reg1;
715 do { 777 do {
716 rc = get_guest(vcpu, val, (u32 __user *) useraddr); 778 rc = read_guest(vcpu, ga, &val, sizeof(val));
717 if (rc) 779 if (rc)
718 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 780 return kvm_s390_inject_prog_cond(vcpu, rc);
719 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; 781 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
720 vcpu->arch.sie_block->gcr[reg] |= val; 782 vcpu->arch.sie_block->gcr[reg] |= val;
721 useraddr += 4; 783 ga += 4;
784 if (reg == reg3)
785 break;
786 reg = (reg + 1) % 16;
787 } while (1);
788
789 return 0;
790}
791
792int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
793{
794 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
795 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
796 u64 ga;
797 u32 val;
798 int reg, rc;
799
800 vcpu->stat.instruction_stctl++;
801
802 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
803 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
804
805 ga = kvm_s390_get_base_disp_rs(vcpu);
806
807 if (ga & 3)
808 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
809
810 VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
811 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
812
813 reg = reg1;
814 do {
815 val = vcpu->arch.sie_block->gcr[reg] & 0x00000000fffffffful;
816 rc = write_guest(vcpu, ga, &val, sizeof(val));
817 if (rc)
818 return kvm_s390_inject_prog_cond(vcpu, rc);
819 ga += 4;
722 if (reg == reg3) 820 if (reg == reg3)
723 break; 821 break;
724 reg = (reg + 1) % 16; 822 reg = (reg + 1) % 16;
@@ -731,7 +829,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
731{ 829{
732 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 830 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
733 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 831 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
734 u64 useraddr; 832 u64 ga, val;
735 int reg, rc; 833 int reg, rc;
736 834
737 vcpu->stat.instruction_lctlg++; 835 vcpu->stat.instruction_lctlg++;
@@ -739,23 +837,58 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
739 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 837 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
740 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 838 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
741 839
742 useraddr = kvm_s390_get_base_disp_rsy(vcpu); 840 ga = kvm_s390_get_base_disp_rsy(vcpu);
743 841
744 if (useraddr & 7) 842 if (ga & 7)
745 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 843 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
746 844
747 reg = reg1; 845 reg = reg1;
748 846
749 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, 847 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
750 useraddr); 848 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
751 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
752 849
753 do { 850 do {
754 rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg], 851 rc = read_guest(vcpu, ga, &val, sizeof(val));
755 (u64 __user *) useraddr);
756 if (rc) 852 if (rc)
757 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 853 return kvm_s390_inject_prog_cond(vcpu, rc);
758 useraddr += 8; 854 vcpu->arch.sie_block->gcr[reg] = val;
855 ga += 8;
856 if (reg == reg3)
857 break;
858 reg = (reg + 1) % 16;
859 } while (1);
860
861 return 0;
862}
863
864static int handle_stctg(struct kvm_vcpu *vcpu)
865{
866 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
867 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
868 u64 ga, val;
869 int reg, rc;
870
871 vcpu->stat.instruction_stctg++;
872
873 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
874 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
875
876 ga = kvm_s390_get_base_disp_rsy(vcpu);
877
878 if (ga & 7)
879 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
880
881 reg = reg1;
882
883 VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
884 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
885
886 do {
887 val = vcpu->arch.sie_block->gcr[reg];
888 rc = write_guest(vcpu, ga, &val, sizeof(val));
889 if (rc)
890 return kvm_s390_inject_prog_cond(vcpu, rc);
891 ga += 8;
759 if (reg == reg3) 892 if (reg == reg3)
760 break; 893 break;
761 reg = (reg + 1) % 16; 894 reg = (reg + 1) % 16;
@@ -766,6 +899,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
766 899
767static const intercept_handler_t eb_handlers[256] = { 900static const intercept_handler_t eb_handlers[256] = {
768 [0x2f] = handle_lctlg, 901 [0x2f] = handle_lctlg,
902 [0x25] = handle_stctg,
769}; 903};
770 904
771int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) 905int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)