aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/kvm-s390.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r--arch/s390/kvm/kvm-s390.c81
1 files changed, 47 insertions, 34 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ba4c7092335a..339ac0964590 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -86,6 +86,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
86 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, 86 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
87 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, 87 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
88 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, 88 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
89 { "deliver_io_interrupt", VCPU_STAT(deliver_io_int) },
89 { "exit_wait_state", VCPU_STAT(exit_wait_state) }, 90 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
90 { "instruction_epsw", VCPU_STAT(instruction_epsw) }, 91 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
91 { "instruction_gs", VCPU_STAT(instruction_gs) }, 92 { "instruction_gs", VCPU_STAT(instruction_gs) },
@@ -179,6 +180,28 @@ int kvm_arch_hardware_enable(void)
179static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, 180static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
180 unsigned long end); 181 unsigned long end);
181 182
183static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
184{
185 u8 delta_idx = 0;
186
187 /*
188 * The TOD jumps by delta, we have to compensate this by adding
189 * -delta to the epoch.
190 */
191 delta = -delta;
192
193 /* sign-extension - we're adding to signed values below */
194 if ((s64)delta < 0)
195 delta_idx = -1;
196
197 scb->epoch += delta;
198 if (scb->ecd & ECD_MEF) {
199 scb->epdx += delta_idx;
200 if (scb->epoch < delta)
201 scb->epdx += 1;
202 }
203}
204
182/* 205/*
183 * This callback is executed during stop_machine(). All CPUs are therefore 206 * This callback is executed during stop_machine(). All CPUs are therefore
184 * temporarily stopped. In order not to change guest behavior, we have to 207 * temporarily stopped. In order not to change guest behavior, we have to
@@ -194,13 +217,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
194 unsigned long long *delta = v; 217 unsigned long long *delta = v;
195 218
196 list_for_each_entry(kvm, &vm_list, vm_list) { 219 list_for_each_entry(kvm, &vm_list, vm_list) {
197 kvm->arch.epoch -= *delta;
198 kvm_for_each_vcpu(i, vcpu, kvm) { 220 kvm_for_each_vcpu(i, vcpu, kvm) {
199 vcpu->arch.sie_block->epoch -= *delta; 221 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
222 if (i == 0) {
223 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
224 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
225 }
200 if (vcpu->arch.cputm_enabled) 226 if (vcpu->arch.cputm_enabled)
201 vcpu->arch.cputm_start += *delta; 227 vcpu->arch.cputm_start += *delta;
202 if (vcpu->arch.vsie_block) 228 if (vcpu->arch.vsie_block)
203 vcpu->arch.vsie_block->epoch -= *delta; 229 kvm_clock_sync_scb(vcpu->arch.vsie_block,
230 *delta);
204 } 231 }
205 } 232 }
206 return NOTIFY_OK; 233 return NOTIFY_OK;
@@ -902,12 +929,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
902 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod))) 929 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
903 return -EFAULT; 930 return -EFAULT;
904 931
905 if (test_kvm_facility(kvm, 139)) 932 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
906 kvm_s390_set_tod_clock_ext(kvm, &gtod);
907 else if (gtod.epoch_idx == 0)
908 kvm_s390_set_tod_clock(kvm, gtod.tod);
909 else
910 return -EINVAL; 933 return -EINVAL;
934 kvm_s390_set_tod_clock(kvm, &gtod);
911 935
912 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", 936 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
913 gtod.epoch_idx, gtod.tod); 937 gtod.epoch_idx, gtod.tod);
@@ -932,13 +956,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
932 956
933static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) 957static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
934{ 958{
935 u64 gtod; 959 struct kvm_s390_vm_tod_clock gtod = { 0 };
936 960
937 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod))) 961 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
962 sizeof(gtod.tod)))
938 return -EFAULT; 963 return -EFAULT;
939 964
940 kvm_s390_set_tod_clock(kvm, gtod); 965 kvm_s390_set_tod_clock(kvm, &gtod);
941 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod); 966 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
942 return 0; 967 return 0;
943} 968}
944 969
@@ -2122,6 +2147,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
2122 /* we still need the basic sca for the ipte control */ 2147 /* we still need the basic sca for the ipte control */
2123 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); 2148 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2124 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; 2149 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2150 return;
2125 } 2151 }
2126 read_lock(&vcpu->kvm->arch.sca_lock); 2152 read_lock(&vcpu->kvm->arch.sca_lock);
2127 if (vcpu->kvm->arch.use_esca) { 2153 if (vcpu->kvm->arch.use_esca) {
@@ -2389,6 +2415,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
2389 mutex_lock(&vcpu->kvm->lock); 2415 mutex_lock(&vcpu->kvm->lock);
2390 preempt_disable(); 2416 preempt_disable();
2391 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; 2417 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
2418 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
2392 preempt_enable(); 2419 preempt_enable();
2393 mutex_unlock(&vcpu->kvm->lock); 2420 mutex_unlock(&vcpu->kvm->lock);
2394 if (!kvm_is_ucontrol(vcpu->kvm)) { 2421 if (!kvm_is_ucontrol(vcpu->kvm)) {
@@ -3021,8 +3048,8 @@ retry:
3021 return 0; 3048 return 0;
3022} 3049}
3023 3050
3024void kvm_s390_set_tod_clock_ext(struct kvm *kvm, 3051void kvm_s390_set_tod_clock(struct kvm *kvm,
3025 const struct kvm_s390_vm_tod_clock *gtod) 3052 const struct kvm_s390_vm_tod_clock *gtod)
3026{ 3053{
3027 struct kvm_vcpu *vcpu; 3054 struct kvm_vcpu *vcpu;
3028 struct kvm_s390_tod_clock_ext htod; 3055 struct kvm_s390_tod_clock_ext htod;
@@ -3034,10 +3061,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
3034 get_tod_clock_ext((char *)&htod); 3061 get_tod_clock_ext((char *)&htod);
3035 3062
3036 kvm->arch.epoch = gtod->tod - htod.tod; 3063 kvm->arch.epoch = gtod->tod - htod.tod;
3037 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; 3064 kvm->arch.epdx = 0;
3038 3065 if (test_kvm_facility(kvm, 139)) {
3039 if (kvm->arch.epoch > gtod->tod) 3066 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3040 kvm->arch.epdx -= 1; 3067 if (kvm->arch.epoch > gtod->tod)
3068 kvm->arch.epdx -= 1;
3069 }
3041 3070
3042 kvm_s390_vcpu_block_all(kvm); 3071 kvm_s390_vcpu_block_all(kvm);
3043 kvm_for_each_vcpu(i, vcpu, kvm) { 3072 kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -3050,22 +3079,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
3050 mutex_unlock(&kvm->lock); 3079 mutex_unlock(&kvm->lock);
3051} 3080}
3052 3081
3053void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
3054{
3055 struct kvm_vcpu *vcpu;
3056 int i;
3057
3058 mutex_lock(&kvm->lock);
3059 preempt_disable();
3060 kvm->arch.epoch = tod - get_tod_clock();
3061 kvm_s390_vcpu_block_all(kvm);
3062 kvm_for_each_vcpu(i, vcpu, kvm)
3063 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3064 kvm_s390_vcpu_unblock_all(kvm);
3065 preempt_enable();
3066 mutex_unlock(&kvm->lock);
3067}
3068
3069/** 3082/**
3070 * kvm_arch_fault_in_page - fault-in guest page if necessary 3083 * kvm_arch_fault_in_page - fault-in guest page if necessary
3071 * @vcpu: The corresponding virtual cpu 3084 * @vcpu: The corresponding virtual cpu