diff options
| -rw-r--r-- | arch/s390/kvm/kvm-s390.c | 46 | ||||
| -rw-r--r-- | arch/s390/kvm/kvm-s390.h | 5 | ||||
| -rw-r--r-- | arch/s390/kvm/priv.c | 9 |
3 files changed, 22 insertions, 38 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index b07aa16dcf06..77d7818130db 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
| @@ -928,12 +928,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) | |||
| 928 | if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) | 928 | if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) |
| 929 | return -EFAULT; | 929 | return -EFAULT; |
| 930 | 930 | ||
| 931 | if (test_kvm_facility(kvm, 139)) | 931 | if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) |
| 932 | kvm_s390_set_tod_clock_ext(kvm, >od); | ||
| 933 | else if (gtod.epoch_idx == 0) | ||
| 934 | kvm_s390_set_tod_clock(kvm, gtod.tod); | ||
| 935 | else | ||
| 936 | return -EINVAL; | 932 | return -EINVAL; |
| 933 | kvm_s390_set_tod_clock(kvm, >od); | ||
| 937 | 934 | ||
| 938 | VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", | 935 | VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", |
| 939 | gtod.epoch_idx, gtod.tod); | 936 | gtod.epoch_idx, gtod.tod); |
| @@ -958,13 +955,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) | |||
| 958 | 955 | ||
| 959 | static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) | 956 | static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) |
| 960 | { | 957 | { |
| 961 | u64 gtod; | 958 | struct kvm_s390_vm_tod_clock gtod = { 0 }; |
| 962 | 959 | ||
| 963 | if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) | 960 | if (copy_from_user(>od.tod, (void __user *)attr->addr, |
| 961 | sizeof(gtod.tod))) | ||
| 964 | return -EFAULT; | 962 | return -EFAULT; |
| 965 | 963 | ||
| 966 | kvm_s390_set_tod_clock(kvm, gtod); | 964 | kvm_s390_set_tod_clock(kvm, >od); |
| 967 | VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod); | 965 | VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); |
| 968 | return 0; | 966 | return 0; |
| 969 | } | 967 | } |
| 970 | 968 | ||
| @@ -3048,8 +3046,8 @@ retry: | |||
| 3048 | return 0; | 3046 | return 0; |
| 3049 | } | 3047 | } |
| 3050 | 3048 | ||
| 3051 | void kvm_s390_set_tod_clock_ext(struct kvm *kvm, | 3049 | void kvm_s390_set_tod_clock(struct kvm *kvm, |
| 3052 | const struct kvm_s390_vm_tod_clock *gtod) | 3050 | const struct kvm_s390_vm_tod_clock *gtod) |
| 3053 | { | 3051 | { |
| 3054 | struct kvm_vcpu *vcpu; | 3052 | struct kvm_vcpu *vcpu; |
| 3055 | struct kvm_s390_tod_clock_ext htod; | 3053 | struct kvm_s390_tod_clock_ext htod; |
| @@ -3061,10 +3059,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm, | |||
| 3061 | get_tod_clock_ext((char *)&htod); | 3059 | get_tod_clock_ext((char *)&htod); |
| 3062 | 3060 | ||
| 3063 | kvm->arch.epoch = gtod->tod - htod.tod; | 3061 | kvm->arch.epoch = gtod->tod - htod.tod; |
| 3064 | kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; | 3062 | kvm->arch.epdx = 0; |
| 3065 | 3063 | if (test_kvm_facility(kvm, 139)) { | |
| 3066 | if (kvm->arch.epoch > gtod->tod) | 3064 | kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; |
| 3067 | kvm->arch.epdx -= 1; | 3065 | if (kvm->arch.epoch > gtod->tod) |
| 3066 | kvm->arch.epdx -= 1; | ||
| 3067 | } | ||
| 3068 | 3068 | ||
| 3069 | kvm_s390_vcpu_block_all(kvm); | 3069 | kvm_s390_vcpu_block_all(kvm); |
| 3070 | kvm_for_each_vcpu(i, vcpu, kvm) { | 3070 | kvm_for_each_vcpu(i, vcpu, kvm) { |
| @@ -3077,22 +3077,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm, | |||
| 3077 | mutex_unlock(&kvm->lock); | 3077 | mutex_unlock(&kvm->lock); |
| 3078 | } | 3078 | } |
| 3079 | 3079 | ||
| 3080 | void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod) | ||
| 3081 | { | ||
| 3082 | struct kvm_vcpu *vcpu; | ||
| 3083 | int i; | ||
| 3084 | |||
| 3085 | mutex_lock(&kvm->lock); | ||
| 3086 | preempt_disable(); | ||
| 3087 | kvm->arch.epoch = tod - get_tod_clock(); | ||
| 3088 | kvm_s390_vcpu_block_all(kvm); | ||
| 3089 | kvm_for_each_vcpu(i, vcpu, kvm) | ||
| 3090 | vcpu->arch.sie_block->epoch = kvm->arch.epoch; | ||
| 3091 | kvm_s390_vcpu_unblock_all(kvm); | ||
| 3092 | preempt_enable(); | ||
| 3093 | mutex_unlock(&kvm->lock); | ||
| 3094 | } | ||
| 3095 | |||
| 3096 | /** | 3080 | /** |
| 3097 | * kvm_arch_fault_in_page - fault-in guest page if necessary | 3081 | * kvm_arch_fault_in_page - fault-in guest page if necessary |
| 3098 | * @vcpu: The corresponding virtual cpu | 3082 | * @vcpu: The corresponding virtual cpu |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 3c0a975c2477..f55ac0ef99ea 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
| @@ -281,9 +281,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); | |||
| 281 | int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); | 281 | int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); |
| 282 | 282 | ||
| 283 | /* implemented in kvm-s390.c */ | 283 | /* implemented in kvm-s390.c */ |
| 284 | void kvm_s390_set_tod_clock_ext(struct kvm *kvm, | 284 | void kvm_s390_set_tod_clock(struct kvm *kvm, |
| 285 | const struct kvm_s390_vm_tod_clock *gtod); | 285 | const struct kvm_s390_vm_tod_clock *gtod); |
| 286 | void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod); | ||
| 287 | long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); | 286 | long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); |
| 288 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); | 287 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); |
| 289 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); | 288 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index a74578cdd3f3..f0b4185158af 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
| @@ -85,9 +85,10 @@ int kvm_s390_handle_e3(struct kvm_vcpu *vcpu) | |||
| 85 | /* Handle SCK (SET CLOCK) interception */ | 85 | /* Handle SCK (SET CLOCK) interception */ |
| 86 | static int handle_set_clock(struct kvm_vcpu *vcpu) | 86 | static int handle_set_clock(struct kvm_vcpu *vcpu) |
| 87 | { | 87 | { |
| 88 | struct kvm_s390_vm_tod_clock gtod = { 0 }; | ||
| 88 | int rc; | 89 | int rc; |
| 89 | u8 ar; | 90 | u8 ar; |
| 90 | u64 op2, val; | 91 | u64 op2; |
| 91 | 92 | ||
| 92 | vcpu->stat.instruction_sck++; | 93 | vcpu->stat.instruction_sck++; |
| 93 | 94 | ||
| @@ -97,12 +98,12 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) | |||
| 97 | op2 = kvm_s390_get_base_disp_s(vcpu, &ar); | 98 | op2 = kvm_s390_get_base_disp_s(vcpu, &ar); |
| 98 | if (op2 & 7) /* Operand must be on a doubleword boundary */ | 99 | if (op2 & 7) /* Operand must be on a doubleword boundary */ |
| 99 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 100 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
| 100 | rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); | 101 | rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod)); |
| 101 | if (rc) | 102 | if (rc) |
| 102 | return kvm_s390_inject_prog_cond(vcpu, rc); | 103 | return kvm_s390_inject_prog_cond(vcpu, rc); |
| 103 | 104 | ||
| 104 | VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); | 105 | VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod); |
| 105 | kvm_s390_set_tod_clock(vcpu->kvm, val); | 106 | kvm_s390_set_tod_clock(vcpu->kvm, >od); |
| 106 | 107 | ||
| 107 | kvm_s390_set_psw_cc(vcpu, 0); | 108 | kvm_s390_set_psw_cc(vcpu, 0); |
| 108 | return 0; | 109 | return 0; |
