diff options
author | Pierre Morel <pmorel@linux.ibm.com> | 2018-10-05 04:31:09 -0400 |
---|---|---|
committer | Christian Borntraeger <borntraeger@de.ibm.com> | 2018-10-05 07:10:18 -0400 |
commit | 0e237e44699465139c07f969b051f83066a2ec1d (patch) | |
tree | b071184641074850e2e8eeb980af4e686797f287 | |
parent | 8e41bd54317b04f2bf03012a4ca8ab7360c9beef (diff) |
KVM: s390: Tracing APCB changes
kvm_arch_crypto_set_masks is a new function to centralize
the setup the APCB masks inside the CRYCB SIE satellite.
To trace APCB mask changes, we add KVM_EVENT() tracing to
both kvm_arch_crypto_set_masks and kvm_arch_crypto_clear_masks.
Signed-off-by: Pierre Morel <pmorel@linux.ibm.com>
Message-Id: <1538728270-10340-2-git-send-email-pmorel@linux.ibm.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 41 |
2 files changed, 43 insertions, 0 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 36d35313e840..22aa4da91f7a 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -861,6 +861,8 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, | |||
861 | struct kvm_async_pf *work); | 861 | struct kvm_async_pf *work); |
862 | 862 | ||
863 | void kvm_arch_crypto_clear_masks(struct kvm *kvm); | 863 | void kvm_arch_crypto_clear_masks(struct kvm *kvm); |
864 | void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, | ||
865 | unsigned long *aqm, unsigned long *adm); | ||
864 | 866 | ||
865 | extern int sie64a(struct kvm_s390_sie_block *, u64 *); | 867 | extern int sie64a(struct kvm_s390_sie_block *, u64 *); |
866 | extern char sie_exit; | 868 | extern char sie_exit; |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 734d87d88eb3..22a320a9a00d 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -2064,6 +2064,46 @@ static void kvm_s390_set_crycb_format(struct kvm *kvm) | |||
2064 | kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; | 2064 | kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; |
2065 | } | 2065 | } |
2066 | 2066 | ||
2067 | void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, | ||
2068 | unsigned long *aqm, unsigned long *adm) | ||
2069 | { | ||
2070 | struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; | ||
2071 | |||
2072 | mutex_lock(&kvm->lock); | ||
2073 | kvm_s390_vcpu_block_all(kvm); | ||
2074 | |||
2075 | switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { | ||
2076 | case CRYCB_FORMAT2: /* APCB1 use 256 bits */ | ||
2077 | memcpy(crycb->apcb1.apm, apm, 32); | ||
2078 | VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx", | ||
2079 | apm[0], apm[1], apm[2], apm[3]); | ||
2080 | memcpy(crycb->apcb1.aqm, aqm, 32); | ||
2081 | VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx", | ||
2082 | aqm[0], aqm[1], aqm[2], aqm[3]); | ||
2083 | memcpy(crycb->apcb1.adm, adm, 32); | ||
2084 | VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx", | ||
2085 | adm[0], adm[1], adm[2], adm[3]); | ||
2086 | break; | ||
2087 | case CRYCB_FORMAT1: | ||
2088 | case CRYCB_FORMAT0: /* Fall through both use APCB0 */ | ||
2089 | memcpy(crycb->apcb0.apm, apm, 8); | ||
2090 | memcpy(crycb->apcb0.aqm, aqm, 2); | ||
2091 | memcpy(crycb->apcb0.adm, adm, 2); | ||
2092 | VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x", | ||
2093 | apm[0], *((unsigned short *)aqm), | ||
2094 | *((unsigned short *)adm)); | ||
2095 | break; | ||
2096 | default: /* Can not happen */ | ||
2097 | break; | ||
2098 | } | ||
2099 | |||
2100 | /* recreate the shadow crycb for each vcpu */ | ||
2101 | kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); | ||
2102 | kvm_s390_vcpu_unblock_all(kvm); | ||
2103 | mutex_unlock(&kvm->lock); | ||
2104 | } | ||
2105 | EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks); | ||
2106 | |||
2067 | void kvm_arch_crypto_clear_masks(struct kvm *kvm) | 2107 | void kvm_arch_crypto_clear_masks(struct kvm *kvm) |
2068 | { | 2108 | { |
2069 | mutex_lock(&kvm->lock); | 2109 | mutex_lock(&kvm->lock); |
@@ -2074,6 +2114,7 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm) | |||
2074 | memset(&kvm->arch.crypto.crycb->apcb1, 0, | 2114 | memset(&kvm->arch.crypto.crycb->apcb1, 0, |
2075 | sizeof(kvm->arch.crypto.crycb->apcb1)); | 2115 | sizeof(kvm->arch.crypto.crycb->apcb1)); |
2076 | 2116 | ||
2117 | VM_EVENT(kvm, 3, "%s", "CLR CRYCB:"); | ||
2077 | /* recreate the shadow crycb for each vcpu */ | 2118 | /* recreate the shadow crycb for each vcpu */ |
2078 | kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); | 2119 | kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); |
2079 | kvm_s390_vcpu_unblock_all(kvm); | 2120 | kvm_s390_vcpu_unblock_all(kvm); |