aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/kvm.h11
-rw-r--r--arch/s390/include/asm/kvm_host.h12
-rw-r--r--arch/s390/kvm/Kconfig9
-rw-r--r--arch/s390/kvm/diag.c6
-rw-r--r--arch/s390/kvm/intercept.c24
-rw-r--r--arch/s390/kvm/interrupt.c3
-rw-r--r--arch/s390/kvm/kvm-s390.c221
-rw-r--r--arch/s390/kvm/kvm-s390.h18
-rw-r--r--arch/s390/kvm/priv.c27
-rw-r--r--arch/s390/kvm/sigp.c57
10 files changed, 305 insertions, 83 deletions
diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h
index 82b32a100c7..96076676e22 100644
--- a/arch/s390/include/asm/kvm.h
+++ b/arch/s390/include/asm/kvm.h
@@ -41,4 +41,15 @@ struct kvm_debug_exit_arch {
41struct kvm_guest_debug_arch { 41struct kvm_guest_debug_arch {
42}; 42};
43 43
44#define KVM_SYNC_PREFIX (1UL << 0)
45#define KVM_SYNC_GPRS (1UL << 1)
46#define KVM_SYNC_ACRS (1UL << 2)
47#define KVM_SYNC_CRS (1UL << 3)
48/* definition of registers in kvm_run */
49struct kvm_sync_regs {
50 __u64 prefix; /* prefix register */
51 __u64 gprs[16]; /* general purpose registers */
52 __u32 acrs[16]; /* access registers */
53 __u64 crs[16]; /* control registers */
54};
44#endif 55#endif
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index b0c235cb6ad..7343872890a 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -220,18 +220,17 @@ struct kvm_s390_float_interrupt {
220 struct list_head list; 220 struct list_head list;
221 atomic_t active; 221 atomic_t active;
222 int next_rr_cpu; 222 int next_rr_cpu;
223 unsigned long idle_mask [(64 + sizeof(long) - 1) / sizeof(long)]; 223 unsigned long idle_mask[(KVM_MAX_VCPUS + sizeof(long) - 1)
224 struct kvm_s390_local_interrupt *local_int[64]; 224 / sizeof(long)];
225 struct kvm_s390_local_interrupt *local_int[KVM_MAX_VCPUS];
225}; 226};
226 227
227 228
228struct kvm_vcpu_arch { 229struct kvm_vcpu_arch {
229 struct kvm_s390_sie_block *sie_block; 230 struct kvm_s390_sie_block *sie_block;
230 unsigned long guest_gprs[16];
231 s390_fp_regs host_fpregs; 231 s390_fp_regs host_fpregs;
232 unsigned int host_acrs[NUM_ACRS]; 232 unsigned int host_acrs[NUM_ACRS];
233 s390_fp_regs guest_fpregs; 233 s390_fp_regs guest_fpregs;
234 unsigned int guest_acrs[NUM_ACRS];
235 struct kvm_s390_local_interrupt local_int; 234 struct kvm_s390_local_interrupt local_int;
236 struct hrtimer ckc_timer; 235 struct hrtimer ckc_timer;
237 struct tasklet_struct tasklet; 236 struct tasklet_struct tasklet;
@@ -246,6 +245,9 @@ struct kvm_vm_stat {
246 u32 remote_tlb_flush; 245 u32 remote_tlb_flush;
247}; 246};
248 247
248struct kvm_arch_memory_slot {
249};
250
249struct kvm_arch{ 251struct kvm_arch{
250 struct sca_block *sca; 252 struct sca_block *sca;
251 debug_info_t *dbf; 253 debug_info_t *dbf;
@@ -253,5 +255,5 @@ struct kvm_arch{
253 struct gmap *gmap; 255 struct gmap *gmap;
254}; 256};
255 257
256extern int sie64a(struct kvm_s390_sie_block *, unsigned long *); 258extern int sie64a(struct kvm_s390_sie_block *, u64 *);
257#endif 259#endif
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index a21634173a6..78eb9847008 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -34,6 +34,15 @@ config KVM
34 34
35 If unsure, say N. 35 If unsure, say N.
36 36
37config KVM_S390_UCONTROL
38 bool "Userspace controlled virtual machines"
39 depends on KVM
40 ---help---
41 Allow CAP_SYS_ADMIN users to create KVM virtual machines that are
42 controlled by userspace.
43
44 If unsure, say N.
45
37# OK, it's a little counter-intuitive to do this, but it puts it neatly under 46# OK, it's a little counter-intuitive to do this, but it puts it neatly under
38# the virtualization menu. 47# the virtualization menu.
39source drivers/vhost/Kconfig 48source drivers/vhost/Kconfig
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 8943e82cd4d..a353f0ea45c 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -20,8 +20,8 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
20 unsigned long start, end; 20 unsigned long start, end;
21 unsigned long prefix = vcpu->arch.sie_block->prefix; 21 unsigned long prefix = vcpu->arch.sie_block->prefix;
22 22
23 start = vcpu->arch.guest_gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; 23 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
24 end = vcpu->arch.guest_gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; 24 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
25 25
26 if (start & ~PAGE_MASK || end & ~PAGE_MASK || start > end 26 if (start & ~PAGE_MASK || end & ~PAGE_MASK || start > end
27 || start < 2 * PAGE_SIZE) 27 || start < 2 * PAGE_SIZE)
@@ -56,7 +56,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
56static int __diag_ipl_functions(struct kvm_vcpu *vcpu) 56static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
57{ 57{
58 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; 58 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
59 unsigned long subcode = vcpu->arch.guest_gprs[reg] & 0xffff; 59 unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
60 60
61 VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); 61 VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode);
62 switch (subcode) { 62 switch (subcode) {
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 02434543eab..361456577c6 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -36,7 +36,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
36 36
37 useraddr = disp2; 37 useraddr = disp2;
38 if (base2) 38 if (base2)
39 useraddr += vcpu->arch.guest_gprs[base2]; 39 useraddr += vcpu->run->s.regs.gprs[base2];
40 40
41 if (useraddr & 7) 41 if (useraddr & 7)
42 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 42 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -75,7 +75,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
75 75
76 useraddr = disp2; 76 useraddr = disp2;
77 if (base2) 77 if (base2)
78 useraddr += vcpu->arch.guest_gprs[base2]; 78 useraddr += vcpu->run->s.regs.gprs[base2];
79 79
80 if (useraddr & 3) 80 if (useraddr & 3)
81 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 81 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -133,13 +133,6 @@ static int handle_stop(struct kvm_vcpu *vcpu)
133 133
134 vcpu->stat.exit_stop_request++; 134 vcpu->stat.exit_stop_request++;
135 spin_lock_bh(&vcpu->arch.local_int.lock); 135 spin_lock_bh(&vcpu->arch.local_int.lock);
136 if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
137 vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
138 rc = kvm_s390_vcpu_store_status(vcpu,
139 KVM_S390_STORE_STATUS_NOADDR);
140 if (rc >= 0)
141 rc = -EOPNOTSUPP;
142 }
143 136
144 if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) { 137 if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
145 vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP; 138 vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP;
@@ -155,7 +148,18 @@ static int handle_stop(struct kvm_vcpu *vcpu)
155 rc = -EOPNOTSUPP; 148 rc = -EOPNOTSUPP;
156 } 149 }
157 150
158 spin_unlock_bh(&vcpu->arch.local_int.lock); 151 if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
152 vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
153 /* store status must be called unlocked. Since local_int.lock
154 * only protects local_int.* and not guest memory we can give
155 * up the lock here */
156 spin_unlock_bh(&vcpu->arch.local_int.lock);
157 rc = kvm_s390_vcpu_store_status(vcpu,
158 KVM_S390_STORE_STATUS_NOADDR);
159 if (rc >= 0)
160 rc = -EOPNOTSUPP;
161 } else
162 spin_unlock_bh(&vcpu->arch.local_int.lock);
159 return rc; 163 return rc;
160} 164}
161 165
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index f0647ce6da2..2d9f9a72bb8 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -236,8 +236,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
236 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", 236 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
237 inti->prefix.address); 237 inti->prefix.address);
238 vcpu->stat.deliver_prefix_signal++; 238 vcpu->stat.deliver_prefix_signal++;
239 vcpu->arch.sie_block->prefix = inti->prefix.address; 239 kvm_s390_set_prefix(vcpu, inti->prefix.address);
240 vcpu->arch.sie_block->ihcpu = 0xffff;
241 break; 240 break;
242 241
243 case KVM_S390_RESTART: 242 case KVM_S390_RESTART:
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index d1c44573245..17ad69d596f 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -129,6 +129,10 @@ int kvm_dev_ioctl_check_extension(long ext)
129 case KVM_CAP_S390_PSW: 129 case KVM_CAP_S390_PSW:
130 case KVM_CAP_S390_GMAP: 130 case KVM_CAP_S390_GMAP:
131 case KVM_CAP_SYNC_MMU: 131 case KVM_CAP_SYNC_MMU:
132#ifdef CONFIG_KVM_S390_UCONTROL
133 case KVM_CAP_S390_UCONTROL:
134#endif
135 case KVM_CAP_SYNC_REGS:
132 r = 1; 136 r = 1;
133 break; 137 break;
134 default: 138 default:
@@ -171,11 +175,22 @@ long kvm_arch_vm_ioctl(struct file *filp,
171 return r; 175 return r;
172} 176}
173 177
174int kvm_arch_init_vm(struct kvm *kvm) 178int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
175{ 179{
176 int rc; 180 int rc;
177 char debug_name[16]; 181 char debug_name[16];
178 182
183 rc = -EINVAL;
184#ifdef CONFIG_KVM_S390_UCONTROL
185 if (type & ~KVM_VM_S390_UCONTROL)
186 goto out_err;
187 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
188 goto out_err;
189#else
190 if (type)
191 goto out_err;
192#endif
193
179 rc = s390_enable_sie(); 194 rc = s390_enable_sie();
180 if (rc) 195 if (rc)
181 goto out_err; 196 goto out_err;
@@ -198,10 +213,13 @@ int kvm_arch_init_vm(struct kvm *kvm)
198 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 213 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
199 VM_EVENT(kvm, 3, "%s", "vm created"); 214 VM_EVENT(kvm, 3, "%s", "vm created");
200 215
201 kvm->arch.gmap = gmap_alloc(current->mm); 216 if (type & KVM_VM_S390_UCONTROL) {
202 if (!kvm->arch.gmap) 217 kvm->arch.gmap = NULL;
203 goto out_nogmap; 218 } else {
204 219 kvm->arch.gmap = gmap_alloc(current->mm);
220 if (!kvm->arch.gmap)
221 goto out_nogmap;
222 }
205 return 0; 223 return 0;
206out_nogmap: 224out_nogmap:
207 debug_unregister(kvm->arch.dbf); 225 debug_unregister(kvm->arch.dbf);
@@ -214,11 +232,18 @@ out_err:
214void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 232void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
215{ 233{
216 VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 234 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
217 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn); 235 if (!kvm_is_ucontrol(vcpu->kvm)) {
218 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == 236 clear_bit(63 - vcpu->vcpu_id,
219 (__u64) vcpu->arch.sie_block) 237 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
220 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; 238 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
239 (__u64) vcpu->arch.sie_block)
240 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
241 }
221 smp_mb(); 242 smp_mb();
243
244 if (kvm_is_ucontrol(vcpu->kvm))
245 gmap_free(vcpu->arch.gmap);
246
222 free_page((unsigned long)(vcpu->arch.sie_block)); 247 free_page((unsigned long)(vcpu->arch.sie_block));
223 kvm_vcpu_uninit(vcpu); 248 kvm_vcpu_uninit(vcpu);
224 kfree(vcpu); 249 kfree(vcpu);
@@ -249,13 +274,25 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
249 kvm_free_vcpus(kvm); 274 kvm_free_vcpus(kvm);
250 free_page((unsigned long)(kvm->arch.sca)); 275 free_page((unsigned long)(kvm->arch.sca));
251 debug_unregister(kvm->arch.dbf); 276 debug_unregister(kvm->arch.dbf);
252 gmap_free(kvm->arch.gmap); 277 if (!kvm_is_ucontrol(kvm))
278 gmap_free(kvm->arch.gmap);
253} 279}
254 280
255/* Section: vcpu related */ 281/* Section: vcpu related */
256int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 282int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
257{ 283{
284 if (kvm_is_ucontrol(vcpu->kvm)) {
285 vcpu->arch.gmap = gmap_alloc(current->mm);
286 if (!vcpu->arch.gmap)
287 return -ENOMEM;
288 return 0;
289 }
290
258 vcpu->arch.gmap = vcpu->kvm->arch.gmap; 291 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
292 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
293 KVM_SYNC_GPRS |
294 KVM_SYNC_ACRS |
295 KVM_SYNC_CRS;
259 return 0; 296 return 0;
260} 297}
261 298
@@ -270,7 +307,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
270 save_access_regs(vcpu->arch.host_acrs); 307 save_access_regs(vcpu->arch.host_acrs);
271 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; 308 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
272 restore_fp_regs(&vcpu->arch.guest_fpregs); 309 restore_fp_regs(&vcpu->arch.guest_fpregs);
273 restore_access_regs(vcpu->arch.guest_acrs); 310 restore_access_regs(vcpu->run->s.regs.acrs);
274 gmap_enable(vcpu->arch.gmap); 311 gmap_enable(vcpu->arch.gmap);
275 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 312 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
276} 313}
@@ -280,7 +317,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
280 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 317 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
281 gmap_disable(vcpu->arch.gmap); 318 gmap_disable(vcpu->arch.gmap);
282 save_fp_regs(&vcpu->arch.guest_fpregs); 319 save_fp_regs(&vcpu->arch.guest_fpregs);
283 save_access_regs(vcpu->arch.guest_acrs); 320 save_access_regs(vcpu->run->s.regs.acrs);
284 restore_fp_regs(&vcpu->arch.host_fpregs); 321 restore_fp_regs(&vcpu->arch.host_fpregs);
285 restore_access_regs(vcpu->arch.host_acrs); 322 restore_access_regs(vcpu->arch.host_acrs);
286} 323}
@@ -290,8 +327,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
290 /* this equals initial cpu reset in pop, but we don't switch to ESA */ 327 /* this equals initial cpu reset in pop, but we don't switch to ESA */
291 vcpu->arch.sie_block->gpsw.mask = 0UL; 328 vcpu->arch.sie_block->gpsw.mask = 0UL;
292 vcpu->arch.sie_block->gpsw.addr = 0UL; 329 vcpu->arch.sie_block->gpsw.addr = 0UL;
293 vcpu->arch.sie_block->prefix = 0UL; 330 kvm_s390_set_prefix(vcpu, 0);
294 vcpu->arch.sie_block->ihcpu = 0xffff;
295 vcpu->arch.sie_block->cputm = 0UL; 331 vcpu->arch.sie_block->cputm = 0UL;
296 vcpu->arch.sie_block->ckc = 0UL; 332 vcpu->arch.sie_block->ckc = 0UL;
297 vcpu->arch.sie_block->todpr = 0; 333 vcpu->arch.sie_block->todpr = 0;
@@ -342,12 +378,19 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
342 goto out_free_cpu; 378 goto out_free_cpu;
343 379
344 vcpu->arch.sie_block->icpua = id; 380 vcpu->arch.sie_block->icpua = id;
345 BUG_ON(!kvm->arch.sca); 381 if (!kvm_is_ucontrol(kvm)) {
346 if (!kvm->arch.sca->cpu[id].sda) 382 if (!kvm->arch.sca) {
347 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; 383 WARN_ON_ONCE(1);
348 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); 384 goto out_free_cpu;
349 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 385 }
350 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); 386 if (!kvm->arch.sca->cpu[id].sda)
387 kvm->arch.sca->cpu[id].sda =
388 (__u64) vcpu->arch.sie_block;
389 vcpu->arch.sie_block->scaoh =
390 (__u32)(((__u64)kvm->arch.sca) >> 32);
391 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
392 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
393 }
351 394
352 spin_lock_init(&vcpu->arch.local_int.lock); 395 spin_lock_init(&vcpu->arch.local_int.lock);
353 INIT_LIST_HEAD(&vcpu->arch.local_int.list); 396 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
@@ -388,29 +431,29 @@ static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
388 431
389int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 432int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
390{ 433{
391 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs)); 434 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
392 return 0; 435 return 0;
393} 436}
394 437
395int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 438int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
396{ 439{
397 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs)); 440 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
398 return 0; 441 return 0;
399} 442}
400 443
401int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 444int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
402 struct kvm_sregs *sregs) 445 struct kvm_sregs *sregs)
403{ 446{
404 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs)); 447 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
405 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); 448 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
406 restore_access_regs(vcpu->arch.guest_acrs); 449 restore_access_regs(vcpu->run->s.regs.acrs);
407 return 0; 450 return 0;
408} 451}
409 452
410int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 453int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
411 struct kvm_sregs *sregs) 454 struct kvm_sregs *sregs)
412{ 455{
413 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs)); 456 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
414 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); 457 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
415 return 0; 458 return 0;
416} 459}
@@ -418,7 +461,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
418int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 461int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
419{ 462{
420 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 463 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
421 vcpu->arch.guest_fpregs.fpc = fpu->fpc; 464 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
422 restore_fp_regs(&vcpu->arch.guest_fpregs); 465 restore_fp_regs(&vcpu->arch.guest_fpregs);
423 return 0; 466 return 0;
424} 467}
@@ -467,9 +510,11 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
467 return -EINVAL; /* not implemented yet */ 510 return -EINVAL; /* not implemented yet */
468} 511}
469 512
470static void __vcpu_run(struct kvm_vcpu *vcpu) 513static int __vcpu_run(struct kvm_vcpu *vcpu)
471{ 514{
472 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16); 515 int rc;
516
517 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
473 518
474 if (need_resched()) 519 if (need_resched())
475 schedule(); 520 schedule();
@@ -477,7 +522,8 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
477 if (test_thread_flag(TIF_MCCK_PENDING)) 522 if (test_thread_flag(TIF_MCCK_PENDING))
478 s390_handle_mcck(); 523 s390_handle_mcck();
479 524
480 kvm_s390_deliver_pending_interrupts(vcpu); 525 if (!kvm_is_ucontrol(vcpu->kvm))
526 kvm_s390_deliver_pending_interrupts(vcpu);
481 527
482 vcpu->arch.sie_block->icptcode = 0; 528 vcpu->arch.sie_block->icptcode = 0;
483 local_irq_disable(); 529 local_irq_disable();
@@ -485,9 +531,15 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
485 local_irq_enable(); 531 local_irq_enable();
486 VCPU_EVENT(vcpu, 6, "entering sie flags %x", 532 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
487 atomic_read(&vcpu->arch.sie_block->cpuflags)); 533 atomic_read(&vcpu->arch.sie_block->cpuflags));
488 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { 534 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
489 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 535 if (rc) {
490 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 536 if (kvm_is_ucontrol(vcpu->kvm)) {
537 rc = SIE_INTERCEPT_UCONTROL;
538 } else {
539 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
540 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
541 rc = 0;
542 }
491 } 543 }
492 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 544 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
493 vcpu->arch.sie_block->icptcode); 545 vcpu->arch.sie_block->icptcode);
@@ -495,7 +547,8 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
495 kvm_guest_exit(); 547 kvm_guest_exit();
496 local_irq_enable(); 548 local_irq_enable();
497 549
498 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16); 550 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
551 return rc;
499} 552}
500 553
501int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 554int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -516,6 +569,7 @@ rerun_vcpu:
516 case KVM_EXIT_UNKNOWN: 569 case KVM_EXIT_UNKNOWN:
517 case KVM_EXIT_INTR: 570 case KVM_EXIT_INTR:
518 case KVM_EXIT_S390_RESET: 571 case KVM_EXIT_S390_RESET:
572 case KVM_EXIT_S390_UCONTROL:
519 break; 573 break;
520 default: 574 default:
521 BUG(); 575 BUG();
@@ -523,12 +577,26 @@ rerun_vcpu:
523 577
524 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; 578 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
525 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; 579 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
580 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
581 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
582 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
583 }
584 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
585 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
586 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
587 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
588 }
526 589
527 might_fault(); 590 might_fault();
528 591
529 do { 592 do {
530 __vcpu_run(vcpu); 593 rc = __vcpu_run(vcpu);
531 rc = kvm_handle_sie_intercept(vcpu); 594 if (rc)
595 break;
596 if (kvm_is_ucontrol(vcpu->kvm))
597 rc = -EOPNOTSUPP;
598 else
599 rc = kvm_handle_sie_intercept(vcpu);
532 } while (!signal_pending(current) && !rc); 600 } while (!signal_pending(current) && !rc);
533 601
534 if (rc == SIE_INTERCEPT_RERUNVCPU) 602 if (rc == SIE_INTERCEPT_RERUNVCPU)
@@ -539,6 +607,16 @@ rerun_vcpu:
539 rc = -EINTR; 607 rc = -EINTR;
540 } 608 }
541 609
610#ifdef CONFIG_KVM_S390_UCONTROL
611 if (rc == SIE_INTERCEPT_UCONTROL) {
612 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
613 kvm_run->s390_ucontrol.trans_exc_code =
614 current->thread.gmap_addr;
615 kvm_run->s390_ucontrol.pgm_code = 0x10;
616 rc = 0;
617 }
618#endif
619
542 if (rc == -EOPNOTSUPP) { 620 if (rc == -EOPNOTSUPP) {
543 /* intercept cannot be handled in-kernel, prepare kvm-run */ 621 /* intercept cannot be handled in-kernel, prepare kvm-run */
544 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 622 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
@@ -556,6 +634,8 @@ rerun_vcpu:
556 634
557 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; 635 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
558 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; 636 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
637 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
638 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
559 639
560 if (vcpu->sigset_active) 640 if (vcpu->sigset_active)
561 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 641 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
@@ -602,7 +682,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
602 return -EFAULT; 682 return -EFAULT;
603 683
604 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs), 684 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
605 vcpu->arch.guest_gprs, 128, prefix)) 685 vcpu->run->s.regs.gprs, 128, prefix))
606 return -EFAULT; 686 return -EFAULT;
607 687
608 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw), 688 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
@@ -631,7 +711,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
631 return -EFAULT; 711 return -EFAULT;
632 712
633 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), 713 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
634 &vcpu->arch.guest_acrs, 64, prefix)) 714 &vcpu->run->s.regs.acrs, 64, prefix))
635 return -EFAULT; 715 return -EFAULT;
636 716
637 if (__guestcopy(vcpu, 717 if (__guestcopy(vcpu,
@@ -673,12 +753,77 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
673 case KVM_S390_INITIAL_RESET: 753 case KVM_S390_INITIAL_RESET:
674 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); 754 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
675 break; 755 break;
756#ifdef CONFIG_KVM_S390_UCONTROL
757 case KVM_S390_UCAS_MAP: {
758 struct kvm_s390_ucas_mapping ucasmap;
759
760 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
761 r = -EFAULT;
762 break;
763 }
764
765 if (!kvm_is_ucontrol(vcpu->kvm)) {
766 r = -EINVAL;
767 break;
768 }
769
770 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
771 ucasmap.vcpu_addr, ucasmap.length);
772 break;
773 }
774 case KVM_S390_UCAS_UNMAP: {
775 struct kvm_s390_ucas_mapping ucasmap;
776
777 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
778 r = -EFAULT;
779 break;
780 }
781
782 if (!kvm_is_ucontrol(vcpu->kvm)) {
783 r = -EINVAL;
784 break;
785 }
786
787 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
788 ucasmap.length);
789 break;
790 }
791#endif
792 case KVM_S390_VCPU_FAULT: {
793 r = gmap_fault(arg, vcpu->arch.gmap);
794 if (!IS_ERR_VALUE(r))
795 r = 0;
796 break;
797 }
676 default: 798 default:
677 r = -EINVAL; 799 r = -ENOTTY;
678 } 800 }
679 return r; 801 return r;
680} 802}
681 803
804int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
805{
806#ifdef CONFIG_KVM_S390_UCONTROL
807 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
808 && (kvm_is_ucontrol(vcpu->kvm))) {
809 vmf->page = virt_to_page(vcpu->arch.sie_block);
810 get_page(vmf->page);
811 return 0;
812 }
813#endif
814 return VM_FAULT_SIGBUS;
815}
816
817void kvm_arch_free_memslot(struct kvm_memory_slot *free,
818 struct kvm_memory_slot *dont)
819{
820}
821
822int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
823{
824 return 0;
825}
826
682/* Section: memory related */ 827/* Section: memory related */
683int kvm_arch_prepare_memory_region(struct kvm *kvm, 828int kvm_arch_prepare_memory_region(struct kvm *kvm,
684 struct kvm_memory_slot *memslot, 829 struct kvm_memory_slot *memslot,
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 99b0b759711..ff28f9d1c9e 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -26,6 +26,7 @@ typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
26 26
27/* negativ values are error codes, positive values for internal conditions */ 27/* negativ values are error codes, positive values for internal conditions */
28#define SIE_INTERCEPT_RERUNVCPU (1<<0) 28#define SIE_INTERCEPT_RERUNVCPU (1<<0)
29#define SIE_INTERCEPT_UCONTROL (1<<1)
29int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); 30int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
30 31
31#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ 32#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
@@ -47,6 +48,23 @@ static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu)
47 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOP_INT; 48 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOP_INT;
48} 49}
49 50
51static inline int kvm_is_ucontrol(struct kvm *kvm)
52{
53#ifdef CONFIG_KVM_S390_UCONTROL
54 if (kvm->arch.gmap)
55 return 0;
56 return 1;
57#else
58 return 0;
59#endif
60}
61
62static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
63{
64 vcpu->arch.sie_block->prefix = prefix & 0x7fffe000u;
65 vcpu->arch.sie_block->ihcpu = 0xffff;
66}
67
50int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); 68int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
51enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); 69enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
52void kvm_s390_tasklet(unsigned long parm); 70void kvm_s390_tasklet(unsigned long parm);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index d0263895992..e5a45dbd26a 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -33,7 +33,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
33 33
34 operand2 = disp2; 34 operand2 = disp2;
35 if (base2) 35 if (base2)
36 operand2 += vcpu->arch.guest_gprs[base2]; 36 operand2 += vcpu->run->s.regs.gprs[base2];
37 37
38 /* must be word boundary */ 38 /* must be word boundary */
39 if (operand2 & 3) { 39 if (operand2 & 3) {
@@ -56,8 +56,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
56 goto out; 56 goto out;
57 } 57 }
58 58
59 vcpu->arch.sie_block->prefix = address; 59 kvm_s390_set_prefix(vcpu, address);
60 vcpu->arch.sie_block->ihcpu = 0xffff;
61 60
62 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); 61 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
63out: 62out:
@@ -74,7 +73,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
74 vcpu->stat.instruction_stpx++; 73 vcpu->stat.instruction_stpx++;
75 operand2 = disp2; 74 operand2 = disp2;
76 if (base2) 75 if (base2)
77 operand2 += vcpu->arch.guest_gprs[base2]; 76 operand2 += vcpu->run->s.regs.gprs[base2];
78 77
79 /* must be word boundary */ 78 /* must be word boundary */
80 if (operand2 & 3) { 79 if (operand2 & 3) {
@@ -106,7 +105,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
106 vcpu->stat.instruction_stap++; 105 vcpu->stat.instruction_stap++;
107 useraddr = disp2; 106 useraddr = disp2;
108 if (base2) 107 if (base2)
109 useraddr += vcpu->arch.guest_gprs[base2]; 108 useraddr += vcpu->run->s.regs.gprs[base2];
110 109
111 if (useraddr & 1) { 110 if (useraddr & 1) {
112 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 111 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -181,7 +180,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
181 vcpu->stat.instruction_stidp++; 180 vcpu->stat.instruction_stidp++;
182 operand2 = disp2; 181 operand2 = disp2;
183 if (base2) 182 if (base2)
184 operand2 += vcpu->arch.guest_gprs[base2]; 183 operand2 += vcpu->run->s.regs.gprs[base2];
185 184
186 if (operand2 & 7) { 185 if (operand2 & 7) {
187 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 186 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -232,9 +231,9 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
232 231
233static int handle_stsi(struct kvm_vcpu *vcpu) 232static int handle_stsi(struct kvm_vcpu *vcpu)
234{ 233{
235 int fc = (vcpu->arch.guest_gprs[0] & 0xf0000000) >> 28; 234 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
236 int sel1 = vcpu->arch.guest_gprs[0] & 0xff; 235 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
237 int sel2 = vcpu->arch.guest_gprs[1] & 0xffff; 236 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
238 int base2 = vcpu->arch.sie_block->ipb >> 28; 237 int base2 = vcpu->arch.sie_block->ipb >> 28;
239 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); 238 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
240 u64 operand2; 239 u64 operand2;
@@ -245,14 +244,14 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
245 244
246 operand2 = disp2; 245 operand2 = disp2;
247 if (base2) 246 if (base2)
248 operand2 += vcpu->arch.guest_gprs[base2]; 247 operand2 += vcpu->run->s.regs.gprs[base2];
249 248
250 if (operand2 & 0xfff && fc > 0) 249 if (operand2 & 0xfff && fc > 0)
251 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 250 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
252 251
253 switch (fc) { 252 switch (fc) {
254 case 0: 253 case 0:
255 vcpu->arch.guest_gprs[0] = 3 << 28; 254 vcpu->run->s.regs.gprs[0] = 3 << 28;
256 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 255 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
257 return 0; 256 return 0;
258 case 1: /* same handling for 1 and 2 */ 257 case 1: /* same handling for 1 and 2 */
@@ -281,7 +280,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
281 } 280 }
282 free_page(mem); 281 free_page(mem);
283 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 282 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
284 vcpu->arch.guest_gprs[0] = 0; 283 vcpu->run->s.regs.gprs[0] = 0;
285 return 0; 284 return 0;
286out_mem: 285out_mem:
287 free_page(mem); 286 free_page(mem);
@@ -333,8 +332,8 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
333 int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; 332 int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
334 int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12; 333 int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
335 int disp2 = vcpu->arch.sie_block->ipb & 0x0fff; 334 int disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
336 u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0; 335 u64 address1 = disp1 + base1 ? vcpu->run->s.regs.gprs[base1] : 0;
337 u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0; 336 u64 address2 = disp2 + base2 ? vcpu->run->s.regs.gprs[base2] : 0;
338 struct vm_area_struct *vma; 337 struct vm_area_struct *vma;
339 unsigned long user_address; 338 unsigned long user_address;
340 339
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 0a7941d74bc..0ad4cf23839 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -48,7 +48,7 @@
48 48
49 49
50static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, 50static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
51 unsigned long *reg) 51 u64 *reg)
52{ 52{
53 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 53 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
54 int rc; 54 int rc;
@@ -160,12 +160,15 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
160 inti->type = KVM_S390_SIGP_STOP; 160 inti->type = KVM_S390_SIGP_STOP;
161 161
162 spin_lock_bh(&li->lock); 162 spin_lock_bh(&li->lock);
163 if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED))
164 goto out;
163 list_add_tail(&inti->list, &li->list); 165 list_add_tail(&inti->list, &li->list);
164 atomic_set(&li->active, 1); 166 atomic_set(&li->active, 1);
165 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); 167 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
166 li->action_bits |= action; 168 li->action_bits |= action;
167 if (waitqueue_active(&li->wq)) 169 if (waitqueue_active(&li->wq))
168 wake_up_interruptible(&li->wq); 170 wake_up_interruptible(&li->wq);
171out:
169 spin_unlock_bh(&li->lock); 172 spin_unlock_bh(&li->lock);
170 173
171 return 0; /* order accepted */ 174 return 0; /* order accepted */
@@ -220,7 +223,7 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
220} 223}
221 224
222static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, 225static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
223 unsigned long *reg) 226 u64 *reg)
224{ 227{
225 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 228 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
226 struct kvm_s390_local_interrupt *li = NULL; 229 struct kvm_s390_local_interrupt *li = NULL;
@@ -278,7 +281,7 @@ out_fi:
278} 281}
279 282
280static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, 283static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
281 unsigned long *reg) 284 u64 *reg)
282{ 285{
283 int rc; 286 int rc;
284 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 287 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
@@ -309,6 +312,34 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
309 return rc; 312 return rc;
310} 313}
311 314
315static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr)
316{
317 int rc = 0;
318 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
319 struct kvm_s390_local_interrupt *li;
320
321 if (cpu_addr >= KVM_MAX_VCPUS)
322 return 3; /* not operational */
323
324 spin_lock(&fi->lock);
325 li = fi->local_int[cpu_addr];
326 if (li == NULL) {
327 rc = 3; /* not operational */
328 goto out;
329 }
330
331 spin_lock_bh(&li->lock);
332 if (li->action_bits & ACTION_STOP_ON_STOP)
333 rc = 2; /* busy */
334 else
335 VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace",
336 cpu_addr);
337 spin_unlock_bh(&li->lock);
338out:
339 spin_unlock(&fi->lock);
340 return rc;
341}
342
312int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) 343int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
313{ 344{
314 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 345 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
@@ -316,7 +347,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
316 int base2 = vcpu->arch.sie_block->ipb >> 28; 347 int base2 = vcpu->arch.sie_block->ipb >> 28;
317 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); 348 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
318 u32 parameter; 349 u32 parameter;
319 u16 cpu_addr = vcpu->arch.guest_gprs[r3]; 350 u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
320 u8 order_code; 351 u8 order_code;
321 int rc; 352 int rc;
322 353
@@ -327,18 +358,18 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
327 358
328 order_code = disp2; 359 order_code = disp2;
329 if (base2) 360 if (base2)
330 order_code += vcpu->arch.guest_gprs[base2]; 361 order_code += vcpu->run->s.regs.gprs[base2];
331 362
332 if (r1 % 2) 363 if (r1 % 2)
333 parameter = vcpu->arch.guest_gprs[r1]; 364 parameter = vcpu->run->s.regs.gprs[r1];
334 else 365 else
335 parameter = vcpu->arch.guest_gprs[r1 + 1]; 366 parameter = vcpu->run->s.regs.gprs[r1 + 1];
336 367
337 switch (order_code) { 368 switch (order_code) {
338 case SIGP_SENSE: 369 case SIGP_SENSE:
339 vcpu->stat.instruction_sigp_sense++; 370 vcpu->stat.instruction_sigp_sense++;
340 rc = __sigp_sense(vcpu, cpu_addr, 371 rc = __sigp_sense(vcpu, cpu_addr,
341 &vcpu->arch.guest_gprs[r1]); 372 &vcpu->run->s.regs.gprs[r1]);
342 break; 373 break;
343 case SIGP_EXTERNAL_CALL: 374 case SIGP_EXTERNAL_CALL:
344 vcpu->stat.instruction_sigp_external_call++; 375 vcpu->stat.instruction_sigp_external_call++;
@@ -354,7 +385,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
354 break; 385 break;
355 case SIGP_STOP_STORE_STATUS: 386 case SIGP_STOP_STORE_STATUS:
356 vcpu->stat.instruction_sigp_stop++; 387 vcpu->stat.instruction_sigp_stop++;
357 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP); 388 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
389 ACTION_STOP_ON_STOP);
358 break; 390 break;
359 case SIGP_SET_ARCH: 391 case SIGP_SET_ARCH:
360 vcpu->stat.instruction_sigp_arch++; 392 vcpu->stat.instruction_sigp_arch++;
@@ -363,15 +395,18 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
363 case SIGP_SET_PREFIX: 395 case SIGP_SET_PREFIX:
364 vcpu->stat.instruction_sigp_prefix++; 396 vcpu->stat.instruction_sigp_prefix++;
365 rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, 397 rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
366 &vcpu->arch.guest_gprs[r1]); 398 &vcpu->run->s.regs.gprs[r1]);
367 break; 399 break;
368 case SIGP_SENSE_RUNNING: 400 case SIGP_SENSE_RUNNING:
369 vcpu->stat.instruction_sigp_sense_running++; 401 vcpu->stat.instruction_sigp_sense_running++;
370 rc = __sigp_sense_running(vcpu, cpu_addr, 402 rc = __sigp_sense_running(vcpu, cpu_addr,
371 &vcpu->arch.guest_gprs[r1]); 403 &vcpu->run->s.regs.gprs[r1]);
372 break; 404 break;
373 case SIGP_RESTART: 405 case SIGP_RESTART:
374 vcpu->stat.instruction_sigp_restart++; 406 vcpu->stat.instruction_sigp_restart++;
407 rc = __sigp_restart(vcpu, cpu_addr);
408 if (rc == 2) /* busy */
409 break;
375 /* user space must know about restart */ 410 /* user space must know about restart */
376 default: 411 default:
377 return -EOPNOTSUPP; 412 return -EOPNOTSUPP;