diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-09 14:42:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-09 14:42:31 -0400 |
commit | d8312a3f61024352f1c7cb967571fd53631b0d6c (patch) | |
tree | be2f2f699e763330b0f0179e9f86009affbc0c7d /arch/s390 | |
parent | e9092d0d97961146655ce51f43850907d95f68c3 (diff) | |
parent | e01bca2fc698d7f0626f0214001af523e18ad60b (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Paolo Bonzini:
"ARM:
- VHE optimizations
- EL2 address space randomization
- speculative execution mitigations ("variant 3a", aka execution past
invalid privilege register access)
- bugfixes and cleanups
PPC:
- improvements for the radix page fault handler for HV KVM on POWER9
s390:
- more kvm stat counters
- virtio gpu plumbing
- documentation
- facilities improvements
x86:
- support for VMware magic I/O port and pseudo-PMCs
- AMD pause loop exiting
- support for AMD core performance extensions
- support for synchronous register access
- expose nVMX capabilities to userspace
- support for Hyper-V signaling via eventfd
- use Enlightened VMCS when running on Hyper-V
- allow userspace to disable MWAIT/HLT/PAUSE vmexits
- usual roundup of optimizations and nested virtualization bugfixes
Generic:
- API selftest infrastructure (though the only tests are for x86 as
of now)"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (174 commits)
kvm: x86: fix a prototype warning
kvm: selftests: add sync_regs_test
kvm: selftests: add API testing infrastructure
kvm: x86: fix a compile warning
KVM: X86: Add Force Emulation Prefix for "emulate the next instruction"
KVM: X86: Introduce handle_ud()
KVM: vmx: unify adjacent #ifdefs
x86: kvm: hide the unused 'cpu' variable
KVM: VMX: remove bogus WARN_ON in handle_ept_misconfig
Revert "KVM: X86: Fix SMRAM accessing even if VM is shutdown"
kvm: Add emulation for movups/movupd
KVM: VMX: raise internal error for exception during invalid protected mode state
KVM: nVMX: Optimization: Dont set KVM_REQ_EVENT when VMExit with nested_run_pending
KVM: nVMX: Require immediate-exit when event reinjected to L2 and L1 event pending
KVM: x86: Fix misleading comments on handling pending exceptions
KVM: x86: Rename interrupt.pending to interrupt.injected
KVM: VMX: No need to clear pending NMI/interrupt on inject realmode interrupt
x86/kvm: use Enlightened VMCS when running on Hyper-V
x86/hyper-v: detect nested features
x86/hyper-v: define struct hv_enlightened_vmcs and clean field bits
...
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 28 | ||||
-rw-r--r-- | arch/s390/include/asm/kvm_para.h | 5 | ||||
-rw-r--r-- | arch/s390/include/asm/mmu.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 2 | ||||
-rw-r--r-- | arch/s390/kvm/gaccess.c | 9 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 17 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 26 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 102 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.h | 2 | ||||
-rw-r--r-- | arch/s390/kvm/priv.c | 4 | ||||
-rw-r--r-- | arch/s390/tools/gen_facilities.c | 20 |
11 files changed, 152 insertions, 67 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index afb0f08b8021..81cdb6b55118 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -294,6 +294,7 @@ struct kvm_vcpu_stat { | |||
294 | u64 exit_userspace; | 294 | u64 exit_userspace; |
295 | u64 exit_null; | 295 | u64 exit_null; |
296 | u64 exit_external_request; | 296 | u64 exit_external_request; |
297 | u64 exit_io_request; | ||
297 | u64 exit_external_interrupt; | 298 | u64 exit_external_interrupt; |
298 | u64 exit_stop_request; | 299 | u64 exit_stop_request; |
299 | u64 exit_validity; | 300 | u64 exit_validity; |
@@ -310,16 +311,29 @@ struct kvm_vcpu_stat { | |||
310 | u64 exit_program_interruption; | 311 | u64 exit_program_interruption; |
311 | u64 exit_instr_and_program; | 312 | u64 exit_instr_and_program; |
312 | u64 exit_operation_exception; | 313 | u64 exit_operation_exception; |
314 | u64 deliver_ckc; | ||
315 | u64 deliver_cputm; | ||
313 | u64 deliver_external_call; | 316 | u64 deliver_external_call; |
314 | u64 deliver_emergency_signal; | 317 | u64 deliver_emergency_signal; |
315 | u64 deliver_service_signal; | 318 | u64 deliver_service_signal; |
316 | u64 deliver_virtio_interrupt; | 319 | u64 deliver_virtio; |
317 | u64 deliver_stop_signal; | 320 | u64 deliver_stop_signal; |
318 | u64 deliver_prefix_signal; | 321 | u64 deliver_prefix_signal; |
319 | u64 deliver_restart_signal; | 322 | u64 deliver_restart_signal; |
320 | u64 deliver_program_int; | 323 | u64 deliver_program; |
321 | u64 deliver_io_int; | 324 | u64 deliver_io; |
325 | u64 deliver_machine_check; | ||
322 | u64 exit_wait_state; | 326 | u64 exit_wait_state; |
327 | u64 inject_ckc; | ||
328 | u64 inject_cputm; | ||
329 | u64 inject_external_call; | ||
330 | u64 inject_emergency_signal; | ||
331 | u64 inject_mchk; | ||
332 | u64 inject_pfault_init; | ||
333 | u64 inject_program; | ||
334 | u64 inject_restart; | ||
335 | u64 inject_set_prefix; | ||
336 | u64 inject_stop_signal; | ||
323 | u64 instruction_epsw; | 337 | u64 instruction_epsw; |
324 | u64 instruction_gs; | 338 | u64 instruction_gs; |
325 | u64 instruction_io_other; | 339 | u64 instruction_io_other; |
@@ -644,7 +658,12 @@ struct kvm_vcpu_arch { | |||
644 | }; | 658 | }; |
645 | 659 | ||
646 | struct kvm_vm_stat { | 660 | struct kvm_vm_stat { |
647 | ulong remote_tlb_flush; | 661 | u64 inject_io; |
662 | u64 inject_float_mchk; | ||
663 | u64 inject_pfault_done; | ||
664 | u64 inject_service_signal; | ||
665 | u64 inject_virtio; | ||
666 | u64 remote_tlb_flush; | ||
648 | }; | 667 | }; |
649 | 668 | ||
650 | struct kvm_arch_memory_slot { | 669 | struct kvm_arch_memory_slot { |
@@ -792,6 +811,7 @@ struct kvm_arch{ | |||
792 | int css_support; | 811 | int css_support; |
793 | int use_irqchip; | 812 | int use_irqchip; |
794 | int use_cmma; | 813 | int use_cmma; |
814 | int use_pfmfi; | ||
795 | int user_cpu_state_ctrl; | 815 | int user_cpu_state_ctrl; |
796 | int user_sigp; | 816 | int user_sigp; |
797 | int user_stsi; | 817 | int user_stsi; |
diff --git a/arch/s390/include/asm/kvm_para.h b/arch/s390/include/asm/kvm_para.h index 74eeec9c0a80..cbc7c3a68e4d 100644 --- a/arch/s390/include/asm/kvm_para.h +++ b/arch/s390/include/asm/kvm_para.h | |||
@@ -193,6 +193,11 @@ static inline unsigned int kvm_arch_para_features(void) | |||
193 | return 0; | 193 | return 0; |
194 | } | 194 | } |
195 | 195 | ||
196 | static inline unsigned int kvm_arch_para_hints(void) | ||
197 | { | ||
198 | return 0; | ||
199 | } | ||
200 | |||
196 | static inline bool kvm_check_and_clear_guest_paused(void) | 201 | static inline bool kvm_check_and_clear_guest_paused(void) |
197 | { | 202 | { |
198 | return false; | 203 | return false; |
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index db35c41a59d5..c639c95850e4 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
@@ -22,8 +22,8 @@ typedef struct { | |||
22 | unsigned int has_pgste:1; | 22 | unsigned int has_pgste:1; |
23 | /* The mmu context uses storage keys. */ | 23 | /* The mmu context uses storage keys. */ |
24 | unsigned int use_skey:1; | 24 | unsigned int use_skey:1; |
25 | /* The mmu context uses CMMA. */ | 25 | /* The mmu context uses CMM. */ |
26 | unsigned int use_cmma:1; | 26 | unsigned int uses_cmm:1; |
27 | } mm_context_t; | 27 | } mm_context_t; |
28 | 28 | ||
29 | #define INIT_MM_CONTEXT(name) \ | 29 | #define INIT_MM_CONTEXT(name) \ |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 6c8ce15cde7b..324f6f452982 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -31,7 +31,7 @@ static inline int init_new_context(struct task_struct *tsk, | |||
31 | (current->mm && current->mm->context.alloc_pgste); | 31 | (current->mm && current->mm->context.alloc_pgste); |
32 | mm->context.has_pgste = 0; | 32 | mm->context.has_pgste = 0; |
33 | mm->context.use_skey = 0; | 33 | mm->context.use_skey = 0; |
34 | mm->context.use_cmma = 0; | 34 | mm->context.uses_cmm = 0; |
35 | #endif | 35 | #endif |
36 | switch (mm->context.asce_limit) { | 36 | switch (mm->context.asce_limit) { |
37 | case _REGION2_SIZE: | 37 | case _REGION2_SIZE: |
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index c24bfa72baf7..8e2b8647ee12 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c | |||
@@ -1050,8 +1050,7 @@ shadow_r2t: | |||
1050 | rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake); | 1050 | rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake); |
1051 | if (rc) | 1051 | if (rc) |
1052 | return rc; | 1052 | return rc; |
1053 | /* fallthrough */ | 1053 | } /* fallthrough */ |
1054 | } | ||
1055 | case ASCE_TYPE_REGION2: { | 1054 | case ASCE_TYPE_REGION2: { |
1056 | union region2_table_entry rste; | 1055 | union region2_table_entry rste; |
1057 | 1056 | ||
@@ -1077,8 +1076,7 @@ shadow_r3t: | |||
1077 | rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake); | 1076 | rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake); |
1078 | if (rc) | 1077 | if (rc) |
1079 | return rc; | 1078 | return rc; |
1080 | /* fallthrough */ | 1079 | } /* fallthrough */ |
1081 | } | ||
1082 | case ASCE_TYPE_REGION3: { | 1080 | case ASCE_TYPE_REGION3: { |
1083 | union region3_table_entry rtte; | 1081 | union region3_table_entry rtte; |
1084 | 1082 | ||
@@ -1113,8 +1111,7 @@ shadow_sgt: | |||
1113 | rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake); | 1111 | rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake); |
1114 | if (rc) | 1112 | if (rc) |
1115 | return rc; | 1113 | return rc; |
1116 | /* fallthrough */ | 1114 | } /* fallthrough */ |
1117 | } | ||
1118 | case ASCE_TYPE_SEGMENT: { | 1115 | case ASCE_TYPE_SEGMENT: { |
1119 | union segment_table_entry ste; | 1116 | union segment_table_entry ste; |
1120 | 1117 | ||
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 07c6e81163bf..a389fa85cca2 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -50,18 +50,6 @@ u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu) | |||
50 | return ilen; | 50 | return ilen; |
51 | } | 51 | } |
52 | 52 | ||
53 | static int handle_noop(struct kvm_vcpu *vcpu) | ||
54 | { | ||
55 | switch (vcpu->arch.sie_block->icptcode) { | ||
56 | case 0x10: | ||
57 | vcpu->stat.exit_external_request++; | ||
58 | break; | ||
59 | default: | ||
60 | break; /* nothing */ | ||
61 | } | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static int handle_stop(struct kvm_vcpu *vcpu) | 53 | static int handle_stop(struct kvm_vcpu *vcpu) |
66 | { | 54 | { |
67 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 55 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
@@ -465,8 +453,11 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) | |||
465 | 453 | ||
466 | switch (vcpu->arch.sie_block->icptcode) { | 454 | switch (vcpu->arch.sie_block->icptcode) { |
467 | case ICPT_EXTREQ: | 455 | case ICPT_EXTREQ: |
456 | vcpu->stat.exit_external_request++; | ||
457 | return 0; | ||
468 | case ICPT_IOREQ: | 458 | case ICPT_IOREQ: |
469 | return handle_noop(vcpu); | 459 | vcpu->stat.exit_io_request++; |
460 | return 0; | ||
470 | case ICPT_INST: | 461 | case ICPT_INST: |
471 | rc = handle_instruction(vcpu); | 462 | rc = handle_instruction(vcpu); |
472 | break; | 463 | break; |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index b04616b57a94..37d06e022238 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -391,6 +391,7 @@ static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) | |||
391 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 391 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
392 | int rc; | 392 | int rc; |
393 | 393 | ||
394 | vcpu->stat.deliver_cputm++; | ||
394 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, | 395 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, |
395 | 0, 0); | 396 | 0, 0); |
396 | 397 | ||
@@ -410,6 +411,7 @@ static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) | |||
410 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 411 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
411 | int rc; | 412 | int rc; |
412 | 413 | ||
414 | vcpu->stat.deliver_ckc++; | ||
413 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, | 415 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, |
414 | 0, 0); | 416 | 0, 0); |
415 | 417 | ||
@@ -595,6 +597,7 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) | |||
595 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | 597 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
596 | KVM_S390_MCHK, | 598 | KVM_S390_MCHK, |
597 | mchk.cr14, mchk.mcic); | 599 | mchk.cr14, mchk.mcic); |
600 | vcpu->stat.deliver_machine_check++; | ||
598 | rc = __write_machine_check(vcpu, &mchk); | 601 | rc = __write_machine_check(vcpu, &mchk); |
599 | } | 602 | } |
600 | return rc; | 603 | return rc; |
@@ -710,7 +713,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
710 | ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK; | 713 | ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK; |
711 | VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d", | 714 | VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d", |
712 | pgm_info.code, ilen); | 715 | pgm_info.code, ilen); |
713 | vcpu->stat.deliver_program_int++; | 716 | vcpu->stat.deliver_program++; |
714 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, | 717 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, |
715 | pgm_info.code, 0); | 718 | pgm_info.code, 0); |
716 | 719 | ||
@@ -899,7 +902,7 @@ static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu) | |||
899 | VCPU_EVENT(vcpu, 4, | 902 | VCPU_EVENT(vcpu, 4, |
900 | "deliver: virtio parm: 0x%x,parm64: 0x%llx", | 903 | "deliver: virtio parm: 0x%x,parm64: 0x%llx", |
901 | inti->ext.ext_params, inti->ext.ext_params2); | 904 | inti->ext.ext_params, inti->ext.ext_params2); |
902 | vcpu->stat.deliver_virtio_interrupt++; | 905 | vcpu->stat.deliver_virtio++; |
903 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | 906 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
904 | inti->type, | 907 | inti->type, |
905 | inti->ext.ext_params, | 908 | inti->ext.ext_params, |
@@ -975,7 +978,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu, | |||
975 | inti->io.subchannel_id >> 1 & 0x3, | 978 | inti->io.subchannel_id >> 1 & 0x3, |
976 | inti->io.subchannel_nr); | 979 | inti->io.subchannel_nr); |
977 | 980 | ||
978 | vcpu->stat.deliver_io_int++; | 981 | vcpu->stat.deliver_io++; |
979 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | 982 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
980 | inti->type, | 983 | inti->type, |
981 | ((__u32)inti->io.subchannel_id << 16) | | 984 | ((__u32)inti->io.subchannel_id << 16) | |
@@ -1004,7 +1007,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu, | |||
1004 | VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc); | 1007 | VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc); |
1005 | memset(&io, 0, sizeof(io)); | 1008 | memset(&io, 0, sizeof(io)); |
1006 | io.io_int_word = isc_to_int_word(isc); | 1009 | io.io_int_word = isc_to_int_word(isc); |
1007 | vcpu->stat.deliver_io_int++; | 1010 | vcpu->stat.deliver_io++; |
1008 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | 1011 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
1009 | KVM_S390_INT_IO(1, 0, 0, 0), | 1012 | KVM_S390_INT_IO(1, 0, 0, 0), |
1010 | ((__u32)io.subchannel_id << 16) | | 1013 | ((__u32)io.subchannel_id << 16) | |
@@ -1268,6 +1271,7 @@ static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | |||
1268 | { | 1271 | { |
1269 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1272 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
1270 | 1273 | ||
1274 | vcpu->stat.inject_program++; | ||
1271 | VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code); | 1275 | VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code); |
1272 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, | 1276 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, |
1273 | irq->u.pgm.code, 0); | 1277 | irq->u.pgm.code, 0); |
@@ -1309,6 +1313,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | |||
1309 | { | 1313 | { |
1310 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1314 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
1311 | 1315 | ||
1316 | vcpu->stat.inject_pfault_init++; | ||
1312 | VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx", | 1317 | VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx", |
1313 | irq->u.ext.ext_params2); | 1318 | irq->u.ext.ext_params2); |
1314 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, | 1319 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, |
@@ -1327,6 +1332,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | |||
1327 | struct kvm_s390_extcall_info *extcall = &li->irq.extcall; | 1332 | struct kvm_s390_extcall_info *extcall = &li->irq.extcall; |
1328 | uint16_t src_id = irq->u.extcall.code; | 1333 | uint16_t src_id = irq->u.extcall.code; |
1329 | 1334 | ||
1335 | vcpu->stat.inject_external_call++; | ||
1330 | VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u", | 1336 | VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u", |
1331 | src_id); | 1337 | src_id); |
1332 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, | 1338 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, |
@@ -1351,6 +1357,7 @@ static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | |||
1351 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1357 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
1352 | struct kvm_s390_prefix_info *prefix = &li->irq.prefix; | 1358 | struct kvm_s390_prefix_info *prefix = &li->irq.prefix; |
1353 | 1359 | ||
1360 | vcpu->stat.inject_set_prefix++; | ||
1354 | VCPU_EVENT(vcpu, 3, "inject: set prefix to %x", | 1361 | VCPU_EVENT(vcpu, 3, "inject: set prefix to %x", |
1355 | irq->u.prefix.address); | 1362 | irq->u.prefix.address); |
1356 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, | 1363 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, |
@@ -1371,6 +1378,7 @@ static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | |||
1371 | struct kvm_s390_stop_info *stop = &li->irq.stop; | 1378 | struct kvm_s390_stop_info *stop = &li->irq.stop; |
1372 | int rc = 0; | 1379 | int rc = 0; |
1373 | 1380 | ||
1381 | vcpu->stat.inject_stop_signal++; | ||
1374 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0); | 1382 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0); |
1375 | 1383 | ||
1376 | if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS) | 1384 | if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS) |
@@ -1395,6 +1403,7 @@ static int __inject_sigp_restart(struct kvm_vcpu *vcpu, | |||
1395 | { | 1403 | { |
1396 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1404 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
1397 | 1405 | ||
1406 | vcpu->stat.inject_restart++; | ||
1398 | VCPU_EVENT(vcpu, 3, "%s", "inject: restart int"); | 1407 | VCPU_EVENT(vcpu, 3, "%s", "inject: restart int"); |
1399 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); | 1408 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); |
1400 | 1409 | ||
@@ -1407,6 +1416,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, | |||
1407 | { | 1416 | { |
1408 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1417 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
1409 | 1418 | ||
1419 | vcpu->stat.inject_emergency_signal++; | ||
1410 | VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u", | 1420 | VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u", |
1411 | irq->u.emerg.code); | 1421 | irq->u.emerg.code); |
1412 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, | 1422 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, |
@@ -1427,6 +1437,7 @@ static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | |||
1427 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1437 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
1428 | struct kvm_s390_mchk_info *mchk = &li->irq.mchk; | 1438 | struct kvm_s390_mchk_info *mchk = &li->irq.mchk; |
1429 | 1439 | ||
1440 | vcpu->stat.inject_mchk++; | ||
1430 | VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx", | 1441 | VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx", |
1431 | irq->u.mchk.mcic); | 1442 | irq->u.mchk.mcic); |
1432 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, | 1443 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, |
@@ -1457,6 +1468,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu) | |||
1457 | { | 1468 | { |
1458 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1469 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
1459 | 1470 | ||
1471 | vcpu->stat.inject_ckc++; | ||
1460 | VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external"); | 1472 | VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external"); |
1461 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, | 1473 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, |
1462 | 0, 0); | 1474 | 0, 0); |
@@ -1470,6 +1482,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) | |||
1470 | { | 1482 | { |
1471 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1483 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
1472 | 1484 | ||
1485 | vcpu->stat.inject_cputm++; | ||
1473 | VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external"); | 1486 | VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external"); |
1474 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, | 1487 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, |
1475 | 0, 0); | 1488 | 0, 0); |
@@ -1596,6 +1609,7 @@ static int __inject_service(struct kvm *kvm, | |||
1596 | { | 1609 | { |
1597 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | 1610 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
1598 | 1611 | ||
1612 | kvm->stat.inject_service_signal++; | ||
1599 | spin_lock(&fi->lock); | 1613 | spin_lock(&fi->lock); |
1600 | fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING; | 1614 | fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING; |
1601 | /* | 1615 | /* |
@@ -1621,6 +1635,7 @@ static int __inject_virtio(struct kvm *kvm, | |||
1621 | { | 1635 | { |
1622 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | 1636 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
1623 | 1637 | ||
1638 | kvm->stat.inject_virtio++; | ||
1624 | spin_lock(&fi->lock); | 1639 | spin_lock(&fi->lock); |
1625 | if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) { | 1640 | if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) { |
1626 | spin_unlock(&fi->lock); | 1641 | spin_unlock(&fi->lock); |
@@ -1638,6 +1653,7 @@ static int __inject_pfault_done(struct kvm *kvm, | |||
1638 | { | 1653 | { |
1639 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | 1654 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
1640 | 1655 | ||
1656 | kvm->stat.inject_pfault_done++; | ||
1641 | spin_lock(&fi->lock); | 1657 | spin_lock(&fi->lock); |
1642 | if (fi->counters[FIRQ_CNTR_PFAULT] >= | 1658 | if (fi->counters[FIRQ_CNTR_PFAULT] >= |
1643 | (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) { | 1659 | (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) { |
@@ -1657,6 +1673,7 @@ static int __inject_float_mchk(struct kvm *kvm, | |||
1657 | { | 1673 | { |
1658 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | 1674 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
1659 | 1675 | ||
1676 | kvm->stat.inject_float_mchk++; | ||
1660 | spin_lock(&fi->lock); | 1677 | spin_lock(&fi->lock); |
1661 | fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS); | 1678 | fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS); |
1662 | fi->mchk.mcic |= inti->mchk.mcic; | 1679 | fi->mchk.mcic |= inti->mchk.mcic; |
@@ -1672,6 +1689,7 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | |||
1672 | struct list_head *list; | 1689 | struct list_head *list; |
1673 | int isc; | 1690 | int isc; |
1674 | 1691 | ||
1692 | kvm->stat.inject_io++; | ||
1675 | isc = int_word_to_isc(inti->io.io_int_word); | 1693 | isc = int_word_to_isc(inti->io.io_int_word); |
1676 | 1694 | ||
1677 | if (kvm->arch.gisa && inti->type & KVM_S390_INT_IO_AI_MASK) { | 1695 | if (kvm->arch.gisa && inti->type & KVM_S390_INT_IO_AI_MASK) { |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 339ac0964590..64c986243018 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -57,6 +57,7 @@ | |||
57 | (KVM_MAX_VCPUS + LOCAL_IRQS)) | 57 | (KVM_MAX_VCPUS + LOCAL_IRQS)) |
58 | 58 | ||
59 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 59 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
60 | #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM | ||
60 | 61 | ||
61 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 62 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
62 | { "userspace_handled", VCPU_STAT(exit_userspace) }, | 63 | { "userspace_handled", VCPU_STAT(exit_userspace) }, |
@@ -64,6 +65,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
64 | { "exit_validity", VCPU_STAT(exit_validity) }, | 65 | { "exit_validity", VCPU_STAT(exit_validity) }, |
65 | { "exit_stop_request", VCPU_STAT(exit_stop_request) }, | 66 | { "exit_stop_request", VCPU_STAT(exit_stop_request) }, |
66 | { "exit_external_request", VCPU_STAT(exit_external_request) }, | 67 | { "exit_external_request", VCPU_STAT(exit_external_request) }, |
68 | { "exit_io_request", VCPU_STAT(exit_io_request) }, | ||
67 | { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, | 69 | { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, |
68 | { "exit_instruction", VCPU_STAT(exit_instruction) }, | 70 | { "exit_instruction", VCPU_STAT(exit_instruction) }, |
69 | { "exit_pei", VCPU_STAT(exit_pei) }, | 71 | { "exit_pei", VCPU_STAT(exit_pei) }, |
@@ -78,16 +80,34 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
78 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, | 80 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, |
79 | { "instruction_stctl", VCPU_STAT(instruction_stctl) }, | 81 | { "instruction_stctl", VCPU_STAT(instruction_stctl) }, |
80 | { "instruction_stctg", VCPU_STAT(instruction_stctg) }, | 82 | { "instruction_stctg", VCPU_STAT(instruction_stctg) }, |
83 | { "deliver_ckc", VCPU_STAT(deliver_ckc) }, | ||
84 | { "deliver_cputm", VCPU_STAT(deliver_cputm) }, | ||
81 | { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, | 85 | { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, |
82 | { "deliver_external_call", VCPU_STAT(deliver_external_call) }, | 86 | { "deliver_external_call", VCPU_STAT(deliver_external_call) }, |
83 | { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, | 87 | { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, |
84 | { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, | 88 | { "deliver_virtio", VCPU_STAT(deliver_virtio) }, |
85 | { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, | 89 | { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, |
86 | { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, | 90 | { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, |
87 | { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, | 91 | { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, |
88 | { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, | 92 | { "deliver_program", VCPU_STAT(deliver_program) }, |
89 | { "deliver_io_interrupt", VCPU_STAT(deliver_io_int) }, | 93 | { "deliver_io", VCPU_STAT(deliver_io) }, |
94 | { "deliver_machine_check", VCPU_STAT(deliver_machine_check) }, | ||
90 | { "exit_wait_state", VCPU_STAT(exit_wait_state) }, | 95 | { "exit_wait_state", VCPU_STAT(exit_wait_state) }, |
96 | { "inject_ckc", VCPU_STAT(inject_ckc) }, | ||
97 | { "inject_cputm", VCPU_STAT(inject_cputm) }, | ||
98 | { "inject_external_call", VCPU_STAT(inject_external_call) }, | ||
99 | { "inject_float_mchk", VM_STAT(inject_float_mchk) }, | ||
100 | { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) }, | ||
101 | { "inject_io", VM_STAT(inject_io) }, | ||
102 | { "inject_mchk", VCPU_STAT(inject_mchk) }, | ||
103 | { "inject_pfault_done", VM_STAT(inject_pfault_done) }, | ||
104 | { "inject_program", VCPU_STAT(inject_program) }, | ||
105 | { "inject_restart", VCPU_STAT(inject_restart) }, | ||
106 | { "inject_service_signal", VM_STAT(inject_service_signal) }, | ||
107 | { "inject_set_prefix", VCPU_STAT(inject_set_prefix) }, | ||
108 | { "inject_stop_signal", VCPU_STAT(inject_stop_signal) }, | ||
109 | { "inject_pfault_init", VCPU_STAT(inject_pfault_init) }, | ||
110 | { "inject_virtio", VM_STAT(inject_virtio) }, | ||
91 | { "instruction_epsw", VCPU_STAT(instruction_epsw) }, | 111 | { "instruction_epsw", VCPU_STAT(instruction_epsw) }, |
92 | { "instruction_gs", VCPU_STAT(instruction_gs) }, | 112 | { "instruction_gs", VCPU_STAT(instruction_gs) }, |
93 | { "instruction_io_other", VCPU_STAT(instruction_io_other) }, | 113 | { "instruction_io_other", VCPU_STAT(instruction_io_other) }, |
@@ -152,13 +172,33 @@ static int nested; | |||
152 | module_param(nested, int, S_IRUGO); | 172 | module_param(nested, int, S_IRUGO); |
153 | MODULE_PARM_DESC(nested, "Nested virtualization support"); | 173 | MODULE_PARM_DESC(nested, "Nested virtualization support"); |
154 | 174 | ||
155 | /* upper facilities limit for kvm */ | ||
156 | unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM }; | ||
157 | 175 | ||
158 | unsigned long kvm_s390_fac_list_mask_size(void) | 176 | /* |
177 | * For now we handle at most 16 double words as this is what the s390 base | ||
178 | * kernel handles and stores in the prefix page. If we ever need to go beyond | ||
179 | * this, this requires changes to code, but the external uapi can stay. | ||
180 | */ | ||
181 | #define SIZE_INTERNAL 16 | ||
182 | |||
183 | /* | ||
184 | * Base feature mask that defines default mask for facilities. Consists of the | ||
185 | * defines in FACILITIES_KVM and the non-hypervisor managed bits. | ||
186 | */ | ||
187 | static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM }; | ||
188 | /* | ||
189 | * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL | ||
190 | * and defines the facilities that can be enabled via a cpu model. | ||
191 | */ | ||
192 | static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL }; | ||
193 | |||
194 | static unsigned long kvm_s390_fac_size(void) | ||
159 | { | 195 | { |
160 | BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64); | 196 | BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64); |
161 | return ARRAY_SIZE(kvm_s390_fac_list_mask); | 197 | BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64); |
198 | BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) > | ||
199 | sizeof(S390_lowcore.stfle_fac_list)); | ||
200 | |||
201 | return SIZE_INTERNAL; | ||
162 | } | 202 | } |
163 | 203 | ||
164 | /* available cpu features supported by kvm */ | 204 | /* available cpu features supported by kvm */ |
@@ -679,6 +719,8 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att | |||
679 | mutex_lock(&kvm->lock); | 719 | mutex_lock(&kvm->lock); |
680 | if (!kvm->created_vcpus) { | 720 | if (!kvm->created_vcpus) { |
681 | kvm->arch.use_cmma = 1; | 721 | kvm->arch.use_cmma = 1; |
722 | /* Not compatible with cmma. */ | ||
723 | kvm->arch.use_pfmfi = 0; | ||
682 | ret = 0; | 724 | ret = 0; |
683 | } | 725 | } |
684 | mutex_unlock(&kvm->lock); | 726 | mutex_unlock(&kvm->lock); |
@@ -1583,7 +1625,7 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm, | |||
1583 | return -EINVAL; | 1625 | return -EINVAL; |
1584 | /* CMMA is disabled or was not used, or the buffer has length zero */ | 1626 | /* CMMA is disabled or was not used, or the buffer has length zero */ |
1585 | bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX); | 1627 | bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX); |
1586 | if (!bufsize || !kvm->mm->context.use_cmma) { | 1628 | if (!bufsize || !kvm->mm->context.uses_cmm) { |
1587 | memset(args, 0, sizeof(*args)); | 1629 | memset(args, 0, sizeof(*args)); |
1588 | return 0; | 1630 | return 0; |
1589 | } | 1631 | } |
@@ -1660,7 +1702,7 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm, | |||
1660 | /* | 1702 | /* |
1661 | * This function sets the CMMA attributes for the given pages. If the input | 1703 | * This function sets the CMMA attributes for the given pages. If the input |
1662 | * buffer has zero length, no action is taken, otherwise the attributes are | 1704 | * buffer has zero length, no action is taken, otherwise the attributes are |
1663 | * set and the mm->context.use_cmma flag is set. | 1705 | * set and the mm->context.uses_cmm flag is set. |
1664 | */ | 1706 | */ |
1665 | static int kvm_s390_set_cmma_bits(struct kvm *kvm, | 1707 | static int kvm_s390_set_cmma_bits(struct kvm *kvm, |
1666 | const struct kvm_s390_cmma_log *args) | 1708 | const struct kvm_s390_cmma_log *args) |
@@ -1710,9 +1752,9 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm, | |||
1710 | srcu_read_unlock(&kvm->srcu, srcu_idx); | 1752 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
1711 | up_read(&kvm->mm->mmap_sem); | 1753 | up_read(&kvm->mm->mmap_sem); |
1712 | 1754 | ||
1713 | if (!kvm->mm->context.use_cmma) { | 1755 | if (!kvm->mm->context.uses_cmm) { |
1714 | down_write(&kvm->mm->mmap_sem); | 1756 | down_write(&kvm->mm->mmap_sem); |
1715 | kvm->mm->context.use_cmma = 1; | 1757 | kvm->mm->context.uses_cmm = 1; |
1716 | up_write(&kvm->mm->mmap_sem); | 1758 | up_write(&kvm->mm->mmap_sem); |
1717 | } | 1759 | } |
1718 | out: | 1760 | out: |
@@ -1967,20 +2009,15 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
1967 | if (!kvm->arch.sie_page2) | 2009 | if (!kvm->arch.sie_page2) |
1968 | goto out_err; | 2010 | goto out_err; |
1969 | 2011 | ||
1970 | /* Populate the facility mask initially. */ | ||
1971 | memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list, | ||
1972 | sizeof(S390_lowcore.stfle_fac_list)); | ||
1973 | for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { | ||
1974 | if (i < kvm_s390_fac_list_mask_size()) | ||
1975 | kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i]; | ||
1976 | else | ||
1977 | kvm->arch.model.fac_mask[i] = 0UL; | ||
1978 | } | ||
1979 | |||
1980 | /* Populate the facility list initially. */ | ||
1981 | kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; | 2012 | kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; |
1982 | memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask, | 2013 | |
1983 | S390_ARCH_FAC_LIST_SIZE_BYTE); | 2014 | for (i = 0; i < kvm_s390_fac_size(); i++) { |
2015 | kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] & | ||
2016 | (kvm_s390_fac_base[i] | | ||
2017 | kvm_s390_fac_ext[i]); | ||
2018 | kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] & | ||
2019 | kvm_s390_fac_base[i]; | ||
2020 | } | ||
1984 | 2021 | ||
1985 | /* we are always in czam mode - even on pre z14 machines */ | 2022 | /* we are always in czam mode - even on pre z14 machines */ |
1986 | set_kvm_facility(kvm->arch.model.fac_mask, 138); | 2023 | set_kvm_facility(kvm->arch.model.fac_mask, 138); |
@@ -2028,6 +2065,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
2028 | 2065 | ||
2029 | kvm->arch.css_support = 0; | 2066 | kvm->arch.css_support = 0; |
2030 | kvm->arch.use_irqchip = 0; | 2067 | kvm->arch.use_irqchip = 0; |
2068 | kvm->arch.use_pfmfi = sclp.has_pfmfi; | ||
2031 | kvm->arch.epoch = 0; | 2069 | kvm->arch.epoch = 0; |
2032 | 2070 | ||
2033 | spin_lock_init(&kvm->arch.start_stop_lock); | 2071 | spin_lock_init(&kvm->arch.start_stop_lock); |
@@ -2454,8 +2492,6 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) | |||
2454 | vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); | 2492 | vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); |
2455 | if (!vcpu->arch.sie_block->cbrlo) | 2493 | if (!vcpu->arch.sie_block->cbrlo) |
2456 | return -ENOMEM; | 2494 | return -ENOMEM; |
2457 | |||
2458 | vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI; | ||
2459 | return 0; | 2495 | return 0; |
2460 | } | 2496 | } |
2461 | 2497 | ||
@@ -2491,7 +2527,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
2491 | if (test_kvm_facility(vcpu->kvm, 73)) | 2527 | if (test_kvm_facility(vcpu->kvm, 73)) |
2492 | vcpu->arch.sie_block->ecb |= ECB_TE; | 2528 | vcpu->arch.sie_block->ecb |= ECB_TE; |
2493 | 2529 | ||
2494 | if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi) | 2530 | if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) |
2495 | vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; | 2531 | vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; |
2496 | if (test_kvm_facility(vcpu->kvm, 130)) | 2532 | if (test_kvm_facility(vcpu->kvm, 130)) |
2497 | vcpu->arch.sie_block->ecb2 |= ECB2_IEP; | 2533 | vcpu->arch.sie_block->ecb2 |= ECB2_IEP; |
@@ -3023,7 +3059,7 @@ retry: | |||
3023 | 3059 | ||
3024 | if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) { | 3060 | if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) { |
3025 | /* | 3061 | /* |
3026 | * Disable CMMA virtualization; we will emulate the ESSA | 3062 | * Disable CMM virtualization; we will emulate the ESSA |
3027 | * instruction manually, in order to provide additional | 3063 | * instruction manually, in order to provide additional |
3028 | * functionalities needed for live migration. | 3064 | * functionalities needed for live migration. |
3029 | */ | 3065 | */ |
@@ -3033,11 +3069,11 @@ retry: | |||
3033 | 3069 | ||
3034 | if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) { | 3070 | if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) { |
3035 | /* | 3071 | /* |
3036 | * Re-enable CMMA virtualization if CMMA is available and | 3072 | * Re-enable CMM virtualization if CMMA is available and |
3037 | * was used. | 3073 | * CMM has been used. |
3038 | */ | 3074 | */ |
3039 | if ((vcpu->kvm->arch.use_cmma) && | 3075 | if ((vcpu->kvm->arch.use_cmma) && |
3040 | (vcpu->kvm->mm->context.use_cmma)) | 3076 | (vcpu->kvm->mm->context.uses_cmm)) |
3041 | vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; | 3077 | vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; |
3042 | goto retry; | 3078 | goto retry; |
3043 | } | 3079 | } |
@@ -4044,7 +4080,7 @@ static int __init kvm_s390_init(void) | |||
4044 | } | 4080 | } |
4045 | 4081 | ||
4046 | for (i = 0; i < 16; i++) | 4082 | for (i = 0; i < 16; i++) |
4047 | kvm_s390_fac_list_mask[i] |= | 4083 | kvm_s390_fac_base[i] |= |
4048 | S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i); | 4084 | S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i); |
4049 | 4085 | ||
4050 | return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); | 4086 | return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index f55ac0ef99ea..1b5621f4fe5b 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -294,8 +294,6 @@ void exit_sie(struct kvm_vcpu *vcpu); | |||
294 | void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu); | 294 | void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu); |
295 | int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); | 295 | int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); |
296 | void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu); | 296 | void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu); |
297 | unsigned long kvm_s390_fac_list_mask_size(void); | ||
298 | extern unsigned long kvm_s390_fac_list_mask[]; | ||
299 | void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm); | 297 | void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm); |
300 | __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu); | 298 | __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu); |
301 | 299 | ||
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index f0b4185158af..ebfa0442e569 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -1078,9 +1078,9 @@ static int handle_essa(struct kvm_vcpu *vcpu) | |||
1078 | * value really needs to be written to; if the value is | 1078 | * value really needs to be written to; if the value is |
1079 | * already correct, we do nothing and avoid the lock. | 1079 | * already correct, we do nothing and avoid the lock. |
1080 | */ | 1080 | */ |
1081 | if (vcpu->kvm->mm->context.use_cmma == 0) { | 1081 | if (vcpu->kvm->mm->context.uses_cmm == 0) { |
1082 | down_write(&vcpu->kvm->mm->mmap_sem); | 1082 | down_write(&vcpu->kvm->mm->mmap_sem); |
1083 | vcpu->kvm->mm->context.use_cmma = 1; | 1083 | vcpu->kvm->mm->context.uses_cmm = 1; |
1084 | up_write(&vcpu->kvm->mm->mmap_sem); | 1084 | up_write(&vcpu->kvm->mm->mmap_sem); |
1085 | } | 1085 | } |
1086 | /* | 1086 | /* |
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c index 424a1ba4f874..90a8c9e84ca6 100644 --- a/arch/s390/tools/gen_facilities.c +++ b/arch/s390/tools/gen_facilities.c | |||
@@ -62,6 +62,13 @@ static struct facility_def facility_defs[] = { | |||
62 | } | 62 | } |
63 | }, | 63 | }, |
64 | { | 64 | { |
65 | /* | ||
66 | * FACILITIES_KVM contains the list of facilities that are part | ||
67 | * of the default facility mask and list that are passed to the | ||
68 | * initial CPU model. If no CPU model is used, this, together | ||
69 | * with the non-hypervisor managed bits, is the maximum list of | ||
70 | * guest facilities supported by KVM. | ||
71 | */ | ||
65 | .name = "FACILITIES_KVM", | 72 | .name = "FACILITIES_KVM", |
66 | .bits = (int[]){ | 73 | .bits = (int[]){ |
67 | 0, /* N3 instructions */ | 74 | 0, /* N3 instructions */ |
@@ -89,6 +96,19 @@ static struct facility_def facility_defs[] = { | |||
89 | -1 /* END */ | 96 | -1 /* END */ |
90 | } | 97 | } |
91 | }, | 98 | }, |
99 | { | ||
100 | /* | ||
101 | * FACILITIES_KVM_CPUMODEL contains the list of facilities | ||
102 | * that can be enabled by CPU model code if the host supports | ||
103 | * it. These facilities are not passed to the guest without | ||
104 | * CPU model support. | ||
105 | */ | ||
106 | |||
107 | .name = "FACILITIES_KVM_CPUMODEL", | ||
108 | .bits = (int[]){ | ||
109 | -1 /* END */ | ||
110 | } | ||
111 | }, | ||
92 | }; | 112 | }; |
93 | 113 | ||
94 | static void print_facility_list(struct facility_def *def) | 114 | static void print_facility_list(struct facility_def *def) |