diff options
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 596 |
1 files changed, 536 insertions, 60 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 3e09801e3104..0c3623927563 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/kvm.h> | 22 | #include <linux/kvm.h> |
23 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/random.h> | ||
25 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
26 | #include <linux/timer.h> | 27 | #include <linux/timer.h> |
27 | #include <asm/asm-offsets.h> | 28 | #include <asm/asm-offsets.h> |
@@ -29,7 +30,6 @@ | |||
29 | #include <asm/pgtable.h> | 30 | #include <asm/pgtable.h> |
30 | #include <asm/nmi.h> | 31 | #include <asm/nmi.h> |
31 | #include <asm/switch_to.h> | 32 | #include <asm/switch_to.h> |
32 | #include <asm/facility.h> | ||
33 | #include <asm/sclp.h> | 33 | #include <asm/sclp.h> |
34 | #include "kvm-s390.h" | 34 | #include "kvm-s390.h" |
35 | #include "gaccess.h" | 35 | #include "gaccess.h" |
@@ -50,6 +50,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
50 | { "exit_instruction", VCPU_STAT(exit_instruction) }, | 50 | { "exit_instruction", VCPU_STAT(exit_instruction) }, |
51 | { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, | 51 | { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, |
52 | { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, | 52 | { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, |
53 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, | ||
53 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 54 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
54 | { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, | 55 | { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, |
55 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, | 56 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, |
@@ -98,15 +99,20 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
98 | { NULL } | 99 | { NULL } |
99 | }; | 100 | }; |
100 | 101 | ||
101 | unsigned long *vfacilities; | 102 | /* upper facilities limit for kvm */ |
102 | static struct gmap_notifier gmap_notifier; | 103 | unsigned long kvm_s390_fac_list_mask[] = { |
104 | 0xff82fffbf4fc2000UL, | ||
105 | 0x005c000000000000UL, | ||
106 | }; | ||
103 | 107 | ||
104 | /* test availability of vfacility */ | 108 | unsigned long kvm_s390_fac_list_mask_size(void) |
105 | int test_vfacility(unsigned long nr) | ||
106 | { | 109 | { |
107 | return __test_facility(nr, (void *) vfacilities); | 110 | BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64); |
111 | return ARRAY_SIZE(kvm_s390_fac_list_mask); | ||
108 | } | 112 | } |
109 | 113 | ||
114 | static struct gmap_notifier gmap_notifier; | ||
115 | |||
110 | /* Section: not file related */ | 116 | /* Section: not file related */ |
111 | int kvm_arch_hardware_enable(void) | 117 | int kvm_arch_hardware_enable(void) |
112 | { | 118 | { |
@@ -166,6 +172,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
166 | case KVM_CAP_S390_IRQCHIP: | 172 | case KVM_CAP_S390_IRQCHIP: |
167 | case KVM_CAP_VM_ATTRIBUTES: | 173 | case KVM_CAP_VM_ATTRIBUTES: |
168 | case KVM_CAP_MP_STATE: | 174 | case KVM_CAP_MP_STATE: |
175 | case KVM_CAP_S390_USER_SIGP: | ||
169 | r = 1; | 176 | r = 1; |
170 | break; | 177 | break; |
171 | case KVM_CAP_NR_VCPUS: | 178 | case KVM_CAP_NR_VCPUS: |
@@ -254,6 +261,10 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) | |||
254 | kvm->arch.use_irqchip = 1; | 261 | kvm->arch.use_irqchip = 1; |
255 | r = 0; | 262 | r = 0; |
256 | break; | 263 | break; |
264 | case KVM_CAP_S390_USER_SIGP: | ||
265 | kvm->arch.user_sigp = 1; | ||
266 | r = 0; | ||
267 | break; | ||
257 | default: | 268 | default: |
258 | r = -EINVAL; | 269 | r = -EINVAL; |
259 | break; | 270 | break; |
@@ -261,7 +272,24 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) | |||
261 | return r; | 272 | return r; |
262 | } | 273 | } |
263 | 274 | ||
264 | static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) | 275 | static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) |
276 | { | ||
277 | int ret; | ||
278 | |||
279 | switch (attr->attr) { | ||
280 | case KVM_S390_VM_MEM_LIMIT_SIZE: | ||
281 | ret = 0; | ||
282 | if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) | ||
283 | ret = -EFAULT; | ||
284 | break; | ||
285 | default: | ||
286 | ret = -ENXIO; | ||
287 | break; | ||
288 | } | ||
289 | return ret; | ||
290 | } | ||
291 | |||
292 | static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) | ||
265 | { | 293 | { |
266 | int ret; | 294 | int ret; |
267 | unsigned int idx; | 295 | unsigned int idx; |
@@ -283,6 +311,36 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) | |||
283 | mutex_unlock(&kvm->lock); | 311 | mutex_unlock(&kvm->lock); |
284 | ret = 0; | 312 | ret = 0; |
285 | break; | 313 | break; |
314 | case KVM_S390_VM_MEM_LIMIT_SIZE: { | ||
315 | unsigned long new_limit; | ||
316 | |||
317 | if (kvm_is_ucontrol(kvm)) | ||
318 | return -EINVAL; | ||
319 | |||
320 | if (get_user(new_limit, (u64 __user *)attr->addr)) | ||
321 | return -EFAULT; | ||
322 | |||
323 | if (new_limit > kvm->arch.gmap->asce_end) | ||
324 | return -E2BIG; | ||
325 | |||
326 | ret = -EBUSY; | ||
327 | mutex_lock(&kvm->lock); | ||
328 | if (atomic_read(&kvm->online_vcpus) == 0) { | ||
329 | /* gmap_alloc will round the limit up */ | ||
330 | struct gmap *new = gmap_alloc(current->mm, new_limit); | ||
331 | |||
332 | if (!new) { | ||
333 | ret = -ENOMEM; | ||
334 | } else { | ||
335 | gmap_free(kvm->arch.gmap); | ||
336 | new->private = kvm; | ||
337 | kvm->arch.gmap = new; | ||
338 | ret = 0; | ||
339 | } | ||
340 | } | ||
341 | mutex_unlock(&kvm->lock); | ||
342 | break; | ||
343 | } | ||
286 | default: | 344 | default: |
287 | ret = -ENXIO; | 345 | ret = -ENXIO; |
288 | break; | 346 | break; |
@@ -290,13 +348,276 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) | |||
290 | return ret; | 348 | return ret; |
291 | } | 349 | } |
292 | 350 | ||
351 | static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu); | ||
352 | |||
353 | static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) | ||
354 | { | ||
355 | struct kvm_vcpu *vcpu; | ||
356 | int i; | ||
357 | |||
358 | if (!test_kvm_facility(kvm, 76)) | ||
359 | return -EINVAL; | ||
360 | |||
361 | mutex_lock(&kvm->lock); | ||
362 | switch (attr->attr) { | ||
363 | case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: | ||
364 | get_random_bytes( | ||
365 | kvm->arch.crypto.crycb->aes_wrapping_key_mask, | ||
366 | sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); | ||
367 | kvm->arch.crypto.aes_kw = 1; | ||
368 | break; | ||
369 | case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: | ||
370 | get_random_bytes( | ||
371 | kvm->arch.crypto.crycb->dea_wrapping_key_mask, | ||
372 | sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); | ||
373 | kvm->arch.crypto.dea_kw = 1; | ||
374 | break; | ||
375 | case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: | ||
376 | kvm->arch.crypto.aes_kw = 0; | ||
377 | memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, | ||
378 | sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); | ||
379 | break; | ||
380 | case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: | ||
381 | kvm->arch.crypto.dea_kw = 0; | ||
382 | memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, | ||
383 | sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); | ||
384 | break; | ||
385 | default: | ||
386 | mutex_unlock(&kvm->lock); | ||
387 | return -ENXIO; | ||
388 | } | ||
389 | |||
390 | kvm_for_each_vcpu(i, vcpu, kvm) { | ||
391 | kvm_s390_vcpu_crypto_setup(vcpu); | ||
392 | exit_sie(vcpu); | ||
393 | } | ||
394 | mutex_unlock(&kvm->lock); | ||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) | ||
399 | { | ||
400 | u8 gtod_high; | ||
401 | |||
402 | if (copy_from_user(>od_high, (void __user *)attr->addr, | ||
403 | sizeof(gtod_high))) | ||
404 | return -EFAULT; | ||
405 | |||
406 | if (gtod_high != 0) | ||
407 | return -EINVAL; | ||
408 | |||
409 | return 0; | ||
410 | } | ||
411 | |||
412 | static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) | ||
413 | { | ||
414 | struct kvm_vcpu *cur_vcpu; | ||
415 | unsigned int vcpu_idx; | ||
416 | u64 host_tod, gtod; | ||
417 | int r; | ||
418 | |||
419 | if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) | ||
420 | return -EFAULT; | ||
421 | |||
422 | r = store_tod_clock(&host_tod); | ||
423 | if (r) | ||
424 | return r; | ||
425 | |||
426 | mutex_lock(&kvm->lock); | ||
427 | kvm->arch.epoch = gtod - host_tod; | ||
428 | kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) { | ||
429 | cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; | ||
430 | exit_sie(cur_vcpu); | ||
431 | } | ||
432 | mutex_unlock(&kvm->lock); | ||
433 | return 0; | ||
434 | } | ||
435 | |||
436 | static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) | ||
437 | { | ||
438 | int ret; | ||
439 | |||
440 | if (attr->flags) | ||
441 | return -EINVAL; | ||
442 | |||
443 | switch (attr->attr) { | ||
444 | case KVM_S390_VM_TOD_HIGH: | ||
445 | ret = kvm_s390_set_tod_high(kvm, attr); | ||
446 | break; | ||
447 | case KVM_S390_VM_TOD_LOW: | ||
448 | ret = kvm_s390_set_tod_low(kvm, attr); | ||
449 | break; | ||
450 | default: | ||
451 | ret = -ENXIO; | ||
452 | break; | ||
453 | } | ||
454 | return ret; | ||
455 | } | ||
456 | |||
457 | static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) | ||
458 | { | ||
459 | u8 gtod_high = 0; | ||
460 | |||
461 | if (copy_to_user((void __user *)attr->addr, >od_high, | ||
462 | sizeof(gtod_high))) | ||
463 | return -EFAULT; | ||
464 | |||
465 | return 0; | ||
466 | } | ||
467 | |||
468 | static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) | ||
469 | { | ||
470 | u64 host_tod, gtod; | ||
471 | int r; | ||
472 | |||
473 | r = store_tod_clock(&host_tod); | ||
474 | if (r) | ||
475 | return r; | ||
476 | |||
477 | gtod = host_tod + kvm->arch.epoch; | ||
478 | if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) | ||
479 | return -EFAULT; | ||
480 | |||
481 | return 0; | ||
482 | } | ||
483 | |||
484 | static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) | ||
485 | { | ||
486 | int ret; | ||
487 | |||
488 | if (attr->flags) | ||
489 | return -EINVAL; | ||
490 | |||
491 | switch (attr->attr) { | ||
492 | case KVM_S390_VM_TOD_HIGH: | ||
493 | ret = kvm_s390_get_tod_high(kvm, attr); | ||
494 | break; | ||
495 | case KVM_S390_VM_TOD_LOW: | ||
496 | ret = kvm_s390_get_tod_low(kvm, attr); | ||
497 | break; | ||
498 | default: | ||
499 | ret = -ENXIO; | ||
500 | break; | ||
501 | } | ||
502 | return ret; | ||
503 | } | ||
504 | |||
505 | static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) | ||
506 | { | ||
507 | struct kvm_s390_vm_cpu_processor *proc; | ||
508 | int ret = 0; | ||
509 | |||
510 | mutex_lock(&kvm->lock); | ||
511 | if (atomic_read(&kvm->online_vcpus)) { | ||
512 | ret = -EBUSY; | ||
513 | goto out; | ||
514 | } | ||
515 | proc = kzalloc(sizeof(*proc), GFP_KERNEL); | ||
516 | if (!proc) { | ||
517 | ret = -ENOMEM; | ||
518 | goto out; | ||
519 | } | ||
520 | if (!copy_from_user(proc, (void __user *)attr->addr, | ||
521 | sizeof(*proc))) { | ||
522 | memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, | ||
523 | sizeof(struct cpuid)); | ||
524 | kvm->arch.model.ibc = proc->ibc; | ||
525 | memcpy(kvm->arch.model.fac->kvm, proc->fac_list, | ||
526 | S390_ARCH_FAC_LIST_SIZE_BYTE); | ||
527 | } else | ||
528 | ret = -EFAULT; | ||
529 | kfree(proc); | ||
530 | out: | ||
531 | mutex_unlock(&kvm->lock); | ||
532 | return ret; | ||
533 | } | ||
534 | |||
535 | static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) | ||
536 | { | ||
537 | int ret = -ENXIO; | ||
538 | |||
539 | switch (attr->attr) { | ||
540 | case KVM_S390_VM_CPU_PROCESSOR: | ||
541 | ret = kvm_s390_set_processor(kvm, attr); | ||
542 | break; | ||
543 | } | ||
544 | return ret; | ||
545 | } | ||
546 | |||
547 | static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) | ||
548 | { | ||
549 | struct kvm_s390_vm_cpu_processor *proc; | ||
550 | int ret = 0; | ||
551 | |||
552 | proc = kzalloc(sizeof(*proc), GFP_KERNEL); | ||
553 | if (!proc) { | ||
554 | ret = -ENOMEM; | ||
555 | goto out; | ||
556 | } | ||
557 | memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); | ||
558 | proc->ibc = kvm->arch.model.ibc; | ||
559 | memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE); | ||
560 | if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) | ||
561 | ret = -EFAULT; | ||
562 | kfree(proc); | ||
563 | out: | ||
564 | return ret; | ||
565 | } | ||
566 | |||
567 | static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) | ||
568 | { | ||
569 | struct kvm_s390_vm_cpu_machine *mach; | ||
570 | int ret = 0; | ||
571 | |||
572 | mach = kzalloc(sizeof(*mach), GFP_KERNEL); | ||
573 | if (!mach) { | ||
574 | ret = -ENOMEM; | ||
575 | goto out; | ||
576 | } | ||
577 | get_cpu_id((struct cpuid *) &mach->cpuid); | ||
578 | mach->ibc = sclp_get_ibc(); | ||
579 | memcpy(&mach->fac_mask, kvm_s390_fac_list_mask, | ||
580 | kvm_s390_fac_list_mask_size() * sizeof(u64)); | ||
581 | memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, | ||
582 | S390_ARCH_FAC_LIST_SIZE_U64); | ||
583 | if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) | ||
584 | ret = -EFAULT; | ||
585 | kfree(mach); | ||
586 | out: | ||
587 | return ret; | ||
588 | } | ||
589 | |||
590 | static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) | ||
591 | { | ||
592 | int ret = -ENXIO; | ||
593 | |||
594 | switch (attr->attr) { | ||
595 | case KVM_S390_VM_CPU_PROCESSOR: | ||
596 | ret = kvm_s390_get_processor(kvm, attr); | ||
597 | break; | ||
598 | case KVM_S390_VM_CPU_MACHINE: | ||
599 | ret = kvm_s390_get_machine(kvm, attr); | ||
600 | break; | ||
601 | } | ||
602 | return ret; | ||
603 | } | ||
604 | |||
293 | static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) | 605 | static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) |
294 | { | 606 | { |
295 | int ret; | 607 | int ret; |
296 | 608 | ||
297 | switch (attr->group) { | 609 | switch (attr->group) { |
298 | case KVM_S390_VM_MEM_CTRL: | 610 | case KVM_S390_VM_MEM_CTRL: |
299 | ret = kvm_s390_mem_control(kvm, attr); | 611 | ret = kvm_s390_set_mem_control(kvm, attr); |
612 | break; | ||
613 | case KVM_S390_VM_TOD: | ||
614 | ret = kvm_s390_set_tod(kvm, attr); | ||
615 | break; | ||
616 | case KVM_S390_VM_CPU_MODEL: | ||
617 | ret = kvm_s390_set_cpu_model(kvm, attr); | ||
618 | break; | ||
619 | case KVM_S390_VM_CRYPTO: | ||
620 | ret = kvm_s390_vm_set_crypto(kvm, attr); | ||
300 | break; | 621 | break; |
301 | default: | 622 | default: |
302 | ret = -ENXIO; | 623 | ret = -ENXIO; |
@@ -308,7 +629,24 @@ static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) | |||
308 | 629 | ||
309 | static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) | 630 | static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) |
310 | { | 631 | { |
311 | return -ENXIO; | 632 | int ret; |
633 | |||
634 | switch (attr->group) { | ||
635 | case KVM_S390_VM_MEM_CTRL: | ||
636 | ret = kvm_s390_get_mem_control(kvm, attr); | ||
637 | break; | ||
638 | case KVM_S390_VM_TOD: | ||
639 | ret = kvm_s390_get_tod(kvm, attr); | ||
640 | break; | ||
641 | case KVM_S390_VM_CPU_MODEL: | ||
642 | ret = kvm_s390_get_cpu_model(kvm, attr); | ||
643 | break; | ||
644 | default: | ||
645 | ret = -ENXIO; | ||
646 | break; | ||
647 | } | ||
648 | |||
649 | return ret; | ||
312 | } | 650 | } |
313 | 651 | ||
314 | static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) | 652 | static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) |
@@ -320,6 +658,42 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) | |||
320 | switch (attr->attr) { | 658 | switch (attr->attr) { |
321 | case KVM_S390_VM_MEM_ENABLE_CMMA: | 659 | case KVM_S390_VM_MEM_ENABLE_CMMA: |
322 | case KVM_S390_VM_MEM_CLR_CMMA: | 660 | case KVM_S390_VM_MEM_CLR_CMMA: |
661 | case KVM_S390_VM_MEM_LIMIT_SIZE: | ||
662 | ret = 0; | ||
663 | break; | ||
664 | default: | ||
665 | ret = -ENXIO; | ||
666 | break; | ||
667 | } | ||
668 | break; | ||
669 | case KVM_S390_VM_TOD: | ||
670 | switch (attr->attr) { | ||
671 | case KVM_S390_VM_TOD_LOW: | ||
672 | case KVM_S390_VM_TOD_HIGH: | ||
673 | ret = 0; | ||
674 | break; | ||
675 | default: | ||
676 | ret = -ENXIO; | ||
677 | break; | ||
678 | } | ||
679 | break; | ||
680 | case KVM_S390_VM_CPU_MODEL: | ||
681 | switch (attr->attr) { | ||
682 | case KVM_S390_VM_CPU_PROCESSOR: | ||
683 | case KVM_S390_VM_CPU_MACHINE: | ||
684 | ret = 0; | ||
685 | break; | ||
686 | default: | ||
687 | ret = -ENXIO; | ||
688 | break; | ||
689 | } | ||
690 | break; | ||
691 | case KVM_S390_VM_CRYPTO: | ||
692 | switch (attr->attr) { | ||
693 | case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: | ||
694 | case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: | ||
695 | case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: | ||
696 | case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: | ||
323 | ret = 0; | 697 | ret = 0; |
324 | break; | 698 | break; |
325 | default: | 699 | default: |
@@ -401,9 +775,61 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
401 | return r; | 775 | return r; |
402 | } | 776 | } |
403 | 777 | ||
778 | static int kvm_s390_query_ap_config(u8 *config) | ||
779 | { | ||
780 | u32 fcn_code = 0x04000000UL; | ||
781 | u32 cc; | ||
782 | |||
783 | asm volatile( | ||
784 | "lgr 0,%1\n" | ||
785 | "lgr 2,%2\n" | ||
786 | ".long 0xb2af0000\n" /* PQAP(QCI) */ | ||
787 | "ipm %0\n" | ||
788 | "srl %0,28\n" | ||
789 | : "=r" (cc) | ||
790 | : "r" (fcn_code), "r" (config) | ||
791 | : "cc", "0", "2", "memory" | ||
792 | ); | ||
793 | |||
794 | return cc; | ||
795 | } | ||
796 | |||
797 | static int kvm_s390_apxa_installed(void) | ||
798 | { | ||
799 | u8 config[128]; | ||
800 | int cc; | ||
801 | |||
802 | if (test_facility(2) && test_facility(12)) { | ||
803 | cc = kvm_s390_query_ap_config(config); | ||
804 | |||
805 | if (cc) | ||
806 | pr_err("PQAP(QCI) failed with cc=%d", cc); | ||
807 | else | ||
808 | return config[0] & 0x40; | ||
809 | } | ||
810 | |||
811 | return 0; | ||
812 | } | ||
813 | |||
814 | static void kvm_s390_set_crycb_format(struct kvm *kvm) | ||
815 | { | ||
816 | kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; | ||
817 | |||
818 | if (kvm_s390_apxa_installed()) | ||
819 | kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; | ||
820 | else | ||
821 | kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; | ||
822 | } | ||
823 | |||
824 | static void kvm_s390_get_cpu_id(struct cpuid *cpu_id) | ||
825 | { | ||
826 | get_cpu_id(cpu_id); | ||
827 | cpu_id->version = 0xff; | ||
828 | } | ||
829 | |||
404 | static int kvm_s390_crypto_init(struct kvm *kvm) | 830 | static int kvm_s390_crypto_init(struct kvm *kvm) |
405 | { | 831 | { |
406 | if (!test_vfacility(76)) | 832 | if (!test_kvm_facility(kvm, 76)) |
407 | return 0; | 833 | return 0; |
408 | 834 | ||
409 | kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), | 835 | kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), |
@@ -411,15 +837,18 @@ static int kvm_s390_crypto_init(struct kvm *kvm) | |||
411 | if (!kvm->arch.crypto.crycb) | 837 | if (!kvm->arch.crypto.crycb) |
412 | return -ENOMEM; | 838 | return -ENOMEM; |
413 | 839 | ||
414 | kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb | | 840 | kvm_s390_set_crycb_format(kvm); |
415 | CRYCB_FORMAT1; | 841 | |
842 | /* Disable AES/DEA protected key functions by default */ | ||
843 | kvm->arch.crypto.aes_kw = 0; | ||
844 | kvm->arch.crypto.dea_kw = 0; | ||
416 | 845 | ||
417 | return 0; | 846 | return 0; |
418 | } | 847 | } |
419 | 848 | ||
420 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | 849 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
421 | { | 850 | { |
422 | int rc; | 851 | int i, rc; |
423 | char debug_name[16]; | 852 | char debug_name[16]; |
424 | static unsigned long sca_offset; | 853 | static unsigned long sca_offset; |
425 | 854 | ||
@@ -454,6 +883,46 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
454 | if (!kvm->arch.dbf) | 883 | if (!kvm->arch.dbf) |
455 | goto out_nodbf; | 884 | goto out_nodbf; |
456 | 885 | ||
886 | /* | ||
887 | * The architectural maximum amount of facilities is 16 kbit. To store | ||
888 | * this amount, 2 kbyte of memory is required. Thus we need a full | ||
889 | * page to hold the active copy (arch.model.fac->sie) and the current | ||
890 | * facilities set (arch.model.fac->kvm). Its address size has to be | ||
891 | * 31 bits and word aligned. | ||
892 | */ | ||
893 | kvm->arch.model.fac = | ||
894 | (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
895 | if (!kvm->arch.model.fac) | ||
896 | goto out_nofac; | ||
897 | |||
898 | memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list, | ||
899 | S390_ARCH_FAC_LIST_SIZE_U64); | ||
900 | |||
901 | /* | ||
902 | * If this KVM host runs *not* in a LPAR, relax the facility bits | ||
903 | * of the kvm facility mask by all missing facilities. This will allow | ||
904 | * to determine the right CPU model by means of the remaining facilities. | ||
905 | * Live guest migration must prohibit the migration of KVMs running in | ||
906 | * a LPAR to non LPAR hosts. | ||
907 | */ | ||
908 | if (!MACHINE_IS_LPAR) | ||
909 | for (i = 0; i < kvm_s390_fac_list_mask_size(); i++) | ||
910 | kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i]; | ||
911 | |||
912 | /* | ||
913 | * Apply the kvm facility mask to limit the kvm supported/tolerated | ||
914 | * facility list. | ||
915 | */ | ||
916 | for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { | ||
917 | if (i < kvm_s390_fac_list_mask_size()) | ||
918 | kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i]; | ||
919 | else | ||
920 | kvm->arch.model.fac->kvm[i] = 0UL; | ||
921 | } | ||
922 | |||
923 | kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); | ||
924 | kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; | ||
925 | |||
457 | if (kvm_s390_crypto_init(kvm) < 0) | 926 | if (kvm_s390_crypto_init(kvm) < 0) |
458 | goto out_crypto; | 927 | goto out_crypto; |
459 | 928 | ||
@@ -477,6 +946,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
477 | 946 | ||
478 | kvm->arch.css_support = 0; | 947 | kvm->arch.css_support = 0; |
479 | kvm->arch.use_irqchip = 0; | 948 | kvm->arch.use_irqchip = 0; |
949 | kvm->arch.epoch = 0; | ||
480 | 950 | ||
481 | spin_lock_init(&kvm->arch.start_stop_lock); | 951 | spin_lock_init(&kvm->arch.start_stop_lock); |
482 | 952 | ||
@@ -484,6 +954,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
484 | out_nogmap: | 954 | out_nogmap: |
485 | kfree(kvm->arch.crypto.crycb); | 955 | kfree(kvm->arch.crypto.crycb); |
486 | out_crypto: | 956 | out_crypto: |
957 | free_page((unsigned long)kvm->arch.model.fac); | ||
958 | out_nofac: | ||
487 | debug_unregister(kvm->arch.dbf); | 959 | debug_unregister(kvm->arch.dbf); |
488 | out_nodbf: | 960 | out_nodbf: |
489 | free_page((unsigned long)(kvm->arch.sca)); | 961 | free_page((unsigned long)(kvm->arch.sca)); |
@@ -536,6 +1008,7 @@ static void kvm_free_vcpus(struct kvm *kvm) | |||
536 | void kvm_arch_destroy_vm(struct kvm *kvm) | 1008 | void kvm_arch_destroy_vm(struct kvm *kvm) |
537 | { | 1009 | { |
538 | kvm_free_vcpus(kvm); | 1010 | kvm_free_vcpus(kvm); |
1011 | free_page((unsigned long)kvm->arch.model.fac); | ||
539 | free_page((unsigned long)(kvm->arch.sca)); | 1012 | free_page((unsigned long)(kvm->arch.sca)); |
540 | debug_unregister(kvm->arch.dbf); | 1013 | debug_unregister(kvm->arch.dbf); |
541 | kfree(kvm->arch.crypto.crycb); | 1014 | kfree(kvm->arch.crypto.crycb); |
@@ -546,25 +1019,30 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
546 | } | 1019 | } |
547 | 1020 | ||
548 | /* Section: vcpu related */ | 1021 | /* Section: vcpu related */ |
1022 | static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) | ||
1023 | { | ||
1024 | vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); | ||
1025 | if (!vcpu->arch.gmap) | ||
1026 | return -ENOMEM; | ||
1027 | vcpu->arch.gmap->private = vcpu->kvm; | ||
1028 | |||
1029 | return 0; | ||
1030 | } | ||
1031 | |||
549 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | 1032 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
550 | { | 1033 | { |
551 | vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; | 1034 | vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; |
552 | kvm_clear_async_pf_completion_queue(vcpu); | 1035 | kvm_clear_async_pf_completion_queue(vcpu); |
553 | if (kvm_is_ucontrol(vcpu->kvm)) { | ||
554 | vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); | ||
555 | if (!vcpu->arch.gmap) | ||
556 | return -ENOMEM; | ||
557 | vcpu->arch.gmap->private = vcpu->kvm; | ||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | vcpu->arch.gmap = vcpu->kvm->arch.gmap; | ||
562 | vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | | 1036 | vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | |
563 | KVM_SYNC_GPRS | | 1037 | KVM_SYNC_GPRS | |
564 | KVM_SYNC_ACRS | | 1038 | KVM_SYNC_ACRS | |
565 | KVM_SYNC_CRS | | 1039 | KVM_SYNC_CRS | |
566 | KVM_SYNC_ARCH0 | | 1040 | KVM_SYNC_ARCH0 | |
567 | KVM_SYNC_PFAULT; | 1041 | KVM_SYNC_PFAULT; |
1042 | |||
1043 | if (kvm_is_ucontrol(vcpu->kvm)) | ||
1044 | return __kvm_ucontrol_vcpu_init(vcpu); | ||
1045 | |||
568 | return 0; | 1046 | return 0; |
569 | } | 1047 | } |
570 | 1048 | ||
@@ -615,16 +1093,27 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) | |||
615 | kvm_s390_clear_local_irqs(vcpu); | 1093 | kvm_s390_clear_local_irqs(vcpu); |
616 | } | 1094 | } |
617 | 1095 | ||
618 | int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | 1096 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
619 | { | 1097 | { |
620 | return 0; | 1098 | mutex_lock(&vcpu->kvm->lock); |
1099 | vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; | ||
1100 | mutex_unlock(&vcpu->kvm->lock); | ||
1101 | if (!kvm_is_ucontrol(vcpu->kvm)) | ||
1102 | vcpu->arch.gmap = vcpu->kvm->arch.gmap; | ||
621 | } | 1103 | } |
622 | 1104 | ||
623 | static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) | 1105 | static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) |
624 | { | 1106 | { |
625 | if (!test_vfacility(76)) | 1107 | if (!test_kvm_facility(vcpu->kvm, 76)) |
626 | return; | 1108 | return; |
627 | 1109 | ||
1110 | vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); | ||
1111 | |||
1112 | if (vcpu->kvm->arch.crypto.aes_kw) | ||
1113 | vcpu->arch.sie_block->ecb3 |= ECB3_AES; | ||
1114 | if (vcpu->kvm->arch.crypto.dea_kw) | ||
1115 | vcpu->arch.sie_block->ecb3 |= ECB3_DEA; | ||
1116 | |||
628 | vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; | 1117 | vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; |
629 | } | 1118 | } |
630 | 1119 | ||
@@ -654,14 +1143,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
654 | CPUSTAT_STOPPED | | 1143 | CPUSTAT_STOPPED | |
655 | CPUSTAT_GED); | 1144 | CPUSTAT_GED); |
656 | vcpu->arch.sie_block->ecb = 6; | 1145 | vcpu->arch.sie_block->ecb = 6; |
657 | if (test_vfacility(50) && test_vfacility(73)) | 1146 | if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) |
658 | vcpu->arch.sie_block->ecb |= 0x10; | 1147 | vcpu->arch.sie_block->ecb |= 0x10; |
659 | 1148 | ||
660 | vcpu->arch.sie_block->ecb2 = 8; | 1149 | vcpu->arch.sie_block->ecb2 = 8; |
661 | vcpu->arch.sie_block->eca = 0xD1002000U; | 1150 | vcpu->arch.sie_block->eca = 0xC1002000U; |
662 | if (sclp_has_siif()) | 1151 | if (sclp_has_siif()) |
663 | vcpu->arch.sie_block->eca |= 1; | 1152 | vcpu->arch.sie_block->eca |= 1; |
664 | vcpu->arch.sie_block->fac = (int) (long) vfacilities; | 1153 | if (sclp_has_sigpif()) |
1154 | vcpu->arch.sie_block->eca |= 0x10000000U; | ||
665 | vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | | 1155 | vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | |
666 | ICTL_TPROT; | 1156 | ICTL_TPROT; |
667 | 1157 | ||
@@ -670,10 +1160,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
670 | if (rc) | 1160 | if (rc) |
671 | return rc; | 1161 | return rc; |
672 | } | 1162 | } |
673 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); | 1163 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
674 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; | 1164 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; |
675 | get_cpu_id(&vcpu->arch.cpu_id); | 1165 | |
676 | vcpu->arch.cpu_id.version = 0xff; | 1166 | mutex_lock(&vcpu->kvm->lock); |
1167 | vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; | ||
1168 | memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm, | ||
1169 | S390_ARCH_FAC_LIST_SIZE_BYTE); | ||
1170 | vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; | ||
1171 | mutex_unlock(&vcpu->kvm->lock); | ||
677 | 1172 | ||
678 | kvm_s390_vcpu_crypto_setup(vcpu); | 1173 | kvm_s390_vcpu_crypto_setup(vcpu); |
679 | 1174 | ||
@@ -717,6 +1212,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
717 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; | 1212 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; |
718 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); | 1213 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); |
719 | } | 1214 | } |
1215 | vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie; | ||
720 | 1216 | ||
721 | spin_lock_init(&vcpu->arch.local_int.lock); | 1217 | spin_lock_init(&vcpu->arch.local_int.lock); |
722 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; | 1218 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; |
@@ -741,7 +1237,7 @@ out: | |||
741 | 1237 | ||
742 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | 1238 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
743 | { | 1239 | { |
744 | return kvm_cpu_has_interrupt(vcpu); | 1240 | return kvm_s390_vcpu_has_irq(vcpu, 0); |
745 | } | 1241 | } |
746 | 1242 | ||
747 | void s390_vcpu_block(struct kvm_vcpu *vcpu) | 1243 | void s390_vcpu_block(struct kvm_vcpu *vcpu) |
@@ -869,6 +1365,8 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, | |||
869 | case KVM_REG_S390_PFTOKEN: | 1365 | case KVM_REG_S390_PFTOKEN: |
870 | r = get_user(vcpu->arch.pfault_token, | 1366 | r = get_user(vcpu->arch.pfault_token, |
871 | (u64 __user *)reg->addr); | 1367 | (u64 __user *)reg->addr); |
1368 | if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) | ||
1369 | kvm_clear_async_pf_completion_queue(vcpu); | ||
872 | break; | 1370 | break; |
873 | case KVM_REG_S390_PFCOMPARE: | 1371 | case KVM_REG_S390_PFCOMPARE: |
874 | r = get_user(vcpu->arch.pfault_compare, | 1372 | r = get_user(vcpu->arch.pfault_compare, |
@@ -1176,7 +1674,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) | |||
1176 | return 0; | 1674 | return 0; |
1177 | if (psw_extint_disabled(vcpu)) | 1675 | if (psw_extint_disabled(vcpu)) |
1178 | return 0; | 1676 | return 0; |
1179 | if (kvm_cpu_has_interrupt(vcpu)) | 1677 | if (kvm_s390_vcpu_has_irq(vcpu, 0)) |
1180 | return 0; | 1678 | return 0; |
1181 | if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) | 1679 | if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) |
1182 | return 0; | 1680 | return 0; |
@@ -1341,6 +1839,8 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1341 | vcpu->arch.pfault_token = kvm_run->s.regs.pft; | 1839 | vcpu->arch.pfault_token = kvm_run->s.regs.pft; |
1342 | vcpu->arch.pfault_select = kvm_run->s.regs.pfs; | 1840 | vcpu->arch.pfault_select = kvm_run->s.regs.pfs; |
1343 | vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; | 1841 | vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; |
1842 | if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) | ||
1843 | kvm_clear_async_pf_completion_queue(vcpu); | ||
1344 | } | 1844 | } |
1345 | kvm_run->kvm_dirty_regs = 0; | 1845 | kvm_run->kvm_dirty_regs = 0; |
1346 | } | 1846 | } |
@@ -1559,15 +2059,10 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) | |||
1559 | spin_lock(&vcpu->kvm->arch.start_stop_lock); | 2059 | spin_lock(&vcpu->kvm->arch.start_stop_lock); |
1560 | online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); | 2060 | online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); |
1561 | 2061 | ||
1562 | /* Need to lock access to action_bits to avoid a SIGP race condition */ | ||
1563 | spin_lock(&vcpu->arch.local_int.lock); | ||
1564 | atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | ||
1565 | |||
1566 | /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ | 2062 | /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ |
1567 | vcpu->arch.local_int.action_bits &= | 2063 | kvm_s390_clear_stop_irq(vcpu); |
1568 | ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP); | ||
1569 | spin_unlock(&vcpu->arch.local_int.lock); | ||
1570 | 2064 | ||
2065 | atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | ||
1571 | __disable_ibs_on_vcpu(vcpu); | 2066 | __disable_ibs_on_vcpu(vcpu); |
1572 | 2067 | ||
1573 | for (i = 0; i < online_vcpus; i++) { | 2068 | for (i = 0; i < online_vcpus; i++) { |
@@ -1783,30 +2278,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
1783 | 2278 | ||
1784 | static int __init kvm_s390_init(void) | 2279 | static int __init kvm_s390_init(void) |
1785 | { | 2280 | { |
1786 | int ret; | 2281 | return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); |
1787 | ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); | ||
1788 | if (ret) | ||
1789 | return ret; | ||
1790 | |||
1791 | /* | ||
1792 | * guests can ask for up to 255+1 double words, we need a full page | ||
1793 | * to hold the maximum amount of facilities. On the other hand, we | ||
1794 | * only set facilities that are known to work in KVM. | ||
1795 | */ | ||
1796 | vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); | ||
1797 | if (!vfacilities) { | ||
1798 | kvm_exit(); | ||
1799 | return -ENOMEM; | ||
1800 | } | ||
1801 | memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); | ||
1802 | vfacilities[0] &= 0xff82fffbf47c2000UL; | ||
1803 | vfacilities[1] &= 0x005c000000000000UL; | ||
1804 | return 0; | ||
1805 | } | 2282 | } |
1806 | 2283 | ||
1807 | static void __exit kvm_s390_exit(void) | 2284 | static void __exit kvm_s390_exit(void) |
1808 | { | 2285 | { |
1809 | free_page((unsigned long) vfacilities); | ||
1810 | kvm_exit(); | 2286 | kvm_exit(); |
1811 | } | 2287 | } |
1812 | 2288 | ||