aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-10-13 10:44:51 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2015-10-13 10:44:51 -0400
commit1330a0170a48ad3788eff01aaf889203652ab4c7 (patch)
tree1bc4d8dd19fe7edfce0efccc673226d346c89a7f
parentb7d2063177a584cb1afb06fc0ed6c48b576f3e75 (diff)
parent60417fcc2b0235dfe3dcd589c56dbe3ea1a64c54 (diff)
Merge tag 'kvm-s390-next-20151013' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD
KVM: s390: Fixes for 4.4 A bunch of fixes and optimizations for interrupt and time handling. No fix is important enough to qualify for 4.3 or stable.
-rw-r--r--arch/s390/kvm/interrupt.c116
-rw-r--r--arch/s390/kvm/kvm-s390.c46
-rw-r--r--arch/s390/kvm/kvm-s390.h35
-rw-r--r--arch/s390/kvm/priv.c19
4 files changed, 97 insertions, 119 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 5c2c169395c3..373e32346d68 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -51,11 +51,9 @@ static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
51 51
52static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) 52static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
53{ 53{
54 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || 54 return psw_extint_disabled(vcpu) &&
55 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || 55 psw_ioint_disabled(vcpu) &&
56 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) 56 psw_mchk_disabled(vcpu);
57 return 0;
58 return 1;
59} 57}
60 58
61static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) 59static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
@@ -71,13 +69,8 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
71 69
72static int ckc_irq_pending(struct kvm_vcpu *vcpu) 70static int ckc_irq_pending(struct kvm_vcpu *vcpu)
73{ 71{
74 preempt_disable(); 72 if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
75 if (!(vcpu->arch.sie_block->ckc <
76 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
77 preempt_enable();
78 return 0; 73 return 0;
79 }
80 preempt_enable();
81 return ckc_interrupts_enabled(vcpu); 74 return ckc_interrupts_enabled(vcpu);
82} 75}
83 76
@@ -109,14 +102,10 @@ static inline u8 int_word_to_isc(u32 int_word)
109 return (int_word & 0x38000000) >> 27; 102 return (int_word & 0x38000000) >> 27;
110} 103}
111 104
112static inline unsigned long pending_floating_irqs(struct kvm_vcpu *vcpu) 105static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
113{ 106{
114 return vcpu->kvm->arch.float_int.pending_irqs; 107 return vcpu->kvm->arch.float_int.pending_irqs |
115} 108 vcpu->arch.local_int.pending_irqs;
116
117static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
118{
119 return vcpu->arch.local_int.pending_irqs;
120} 109}
121 110
122static unsigned long disable_iscs(struct kvm_vcpu *vcpu, 111static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
@@ -135,8 +124,7 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
135{ 124{
136 unsigned long active_mask; 125 unsigned long active_mask;
137 126
138 active_mask = pending_local_irqs(vcpu); 127 active_mask = pending_irqs(vcpu);
139 active_mask |= pending_floating_irqs(vcpu);
140 if (!active_mask) 128 if (!active_mask)
141 return 0; 129 return 0;
142 130
@@ -204,7 +192,7 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
204 192
205static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) 193static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
206{ 194{
207 if (!(pending_floating_irqs(vcpu) & IRQ_PEND_IO_MASK)) 195 if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK))
208 return; 196 return;
209 else if (psw_ioint_disabled(vcpu)) 197 else if (psw_ioint_disabled(vcpu))
210 __set_cpuflag(vcpu, CPUSTAT_IO_INT); 198 __set_cpuflag(vcpu, CPUSTAT_IO_INT);
@@ -214,7 +202,7 @@ static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
214 202
215static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) 203static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
216{ 204{
217 if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK)) 205 if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK))
218 return; 206 return;
219 if (psw_extint_disabled(vcpu)) 207 if (psw_extint_disabled(vcpu))
220 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 208 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
@@ -224,7 +212,7 @@ static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
224 212
225static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) 213static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
226{ 214{
227 if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) 215 if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
228 return; 216 return;
229 if (psw_mchk_disabled(vcpu)) 217 if (psw_mchk_disabled(vcpu))
230 vcpu->arch.sie_block->ictl |= ICTL_LPSW; 218 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
@@ -815,23 +803,21 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
815 803
816int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) 804int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
817{ 805{
818 int rc; 806 if (deliverable_irqs(vcpu))
807 return 1;
819 808
820 rc = !!deliverable_irqs(vcpu); 809 if (kvm_cpu_has_pending_timer(vcpu))
821 810 return 1;
822 if (!rc && kvm_cpu_has_pending_timer(vcpu))
823 rc = 1;
824 811
825 /* external call pending and deliverable */ 812 /* external call pending and deliverable */
826 if (!rc && kvm_s390_ext_call_pending(vcpu) && 813 if (kvm_s390_ext_call_pending(vcpu) &&
827 !psw_extint_disabled(vcpu) && 814 !psw_extint_disabled(vcpu) &&
828 (vcpu->arch.sie_block->gcr[0] & 0x2000ul)) 815 (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
829 rc = 1; 816 return 1;
830
831 if (!rc && !exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
832 rc = 1;
833 817
834 return rc; 818 if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
819 return 1;
820 return 0;
835} 821}
836 822
837int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 823int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
@@ -846,7 +832,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
846 vcpu->stat.exit_wait_state++; 832 vcpu->stat.exit_wait_state++;
847 833
848 /* fast path */ 834 /* fast path */
849 if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu)) 835 if (kvm_arch_vcpu_runnable(vcpu))
850 return 0; 836 return 0;
851 837
852 if (psw_interrupts_disabled(vcpu)) { 838 if (psw_interrupts_disabled(vcpu)) {
@@ -860,9 +846,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
860 goto no_timer; 846 goto no_timer;
861 } 847 }
862 848
863 preempt_disable(); 849 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
864 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
865 preempt_enable();
866 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 850 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
867 851
868 /* underflow */ 852 /* underflow */
@@ -901,9 +885,7 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
901 u64 now, sltime; 885 u64 now, sltime;
902 886
903 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 887 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
904 preempt_disable(); 888 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
905 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
906 preempt_enable();
907 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 889 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
908 890
909 /* 891 /*
@@ -981,39 +963,30 @@ static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
981 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, 963 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
982 irq->u.pgm.code, 0); 964 irq->u.pgm.code, 0);
983 965
984 li->irq.pgm = irq->u.pgm; 966 if (irq->u.pgm.code == PGM_PER) {
967 li->irq.pgm.code |= PGM_PER;
968 /* only modify PER related information */
969 li->irq.pgm.per_address = irq->u.pgm.per_address;
970 li->irq.pgm.per_code = irq->u.pgm.per_code;
971 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
972 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
973 } else if (!(irq->u.pgm.code & PGM_PER)) {
974 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
975 irq->u.pgm.code;
976 /* only modify non-PER information */
977 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
978 li->irq.pgm.mon_code = irq->u.pgm.mon_code;
979 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
980 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
981 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
982 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
983 } else {
984 li->irq.pgm = irq->u.pgm;
985 }
985 set_bit(IRQ_PEND_PROG, &li->pending_irqs); 986 set_bit(IRQ_PEND_PROG, &li->pending_irqs);
986 return 0; 987 return 0;
987} 988}
988 989
989int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
990{
991 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
992 struct kvm_s390_irq irq;
993
994 spin_lock(&li->lock);
995 irq.u.pgm.code = code;
996 __inject_prog(vcpu, &irq);
997 BUG_ON(waitqueue_active(li->wq));
998 spin_unlock(&li->lock);
999 return 0;
1000}
1001
1002int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
1003 struct kvm_s390_pgm_info *pgm_info)
1004{
1005 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1006 struct kvm_s390_irq irq;
1007 int rc;
1008
1009 spin_lock(&li->lock);
1010 irq.u.pgm = *pgm_info;
1011 rc = __inject_prog(vcpu, &irq);
1012 BUG_ON(waitqueue_active(li->wq));
1013 spin_unlock(&li->lock);
1014 return rc;
1015}
1016
1017static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 990static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1018{ 991{
1019 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 992 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -1390,12 +1363,9 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
1390 1363
1391static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) 1364static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1392{ 1365{
1393 struct kvm_s390_float_interrupt *fi;
1394 u64 type = READ_ONCE(inti->type); 1366 u64 type = READ_ONCE(inti->type);
1395 int rc; 1367 int rc;
1396 1368
1397 fi = &kvm->arch.float_int;
1398
1399 switch (type) { 1369 switch (type) {
1400 case KVM_S390_MCHK: 1370 case KVM_S390_MCHK:
1401 rc = __inject_float_mchk(kvm, inti); 1371 rc = __inject_float_mchk(kvm, inti);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0a67c40eece9..618c85411a51 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -521,27 +521,12 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
521 521
522static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) 522static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
523{ 523{
524 struct kvm_vcpu *cur_vcpu; 524 u64 gtod;
525 unsigned int vcpu_idx;
526 u64 host_tod, gtod;
527 int r;
528 525
529 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod))) 526 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
530 return -EFAULT; 527 return -EFAULT;
531 528
532 r = store_tod_clock(&host_tod); 529 kvm_s390_set_tod_clock(kvm, gtod);
533 if (r)
534 return r;
535
536 mutex_lock(&kvm->lock);
537 preempt_disable();
538 kvm->arch.epoch = gtod - host_tod;
539 kvm_s390_vcpu_block_all(kvm);
540 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
541 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
542 kvm_s390_vcpu_unblock_all(kvm);
543 preempt_enable();
544 mutex_unlock(&kvm->lock);
545 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod); 530 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
546 return 0; 531 return 0;
547} 532}
@@ -581,16 +566,9 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
581 566
582static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) 567static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
583{ 568{
584 u64 host_tod, gtod; 569 u64 gtod;
585 int r;
586 570
587 r = store_tod_clock(&host_tod); 571 gtod = kvm_s390_get_tod_clock_fast(kvm);
588 if (r)
589 return r;
590
591 preempt_disable();
592 gtod = host_tod + kvm->arch.epoch;
593 preempt_enable();
594 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod))) 572 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
595 return -EFAULT; 573 return -EFAULT;
596 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod); 574 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
@@ -1916,6 +1894,22 @@ retry:
1916 return 0; 1894 return 0;
1917} 1895}
1918 1896
1897void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
1898{
1899 struct kvm_vcpu *vcpu;
1900 int i;
1901
1902 mutex_lock(&kvm->lock);
1903 preempt_disable();
1904 kvm->arch.epoch = tod - get_tod_clock();
1905 kvm_s390_vcpu_block_all(kvm);
1906 kvm_for_each_vcpu(i, vcpu, kvm)
1907 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
1908 kvm_s390_vcpu_unblock_all(kvm);
1909 preempt_enable();
1910 mutex_unlock(&kvm->lock);
1911}
1912
1919/** 1913/**
1920 * kvm_arch_fault_in_page - fault-in guest page if necessary 1914 * kvm_arch_fault_in_page - fault-in guest page if necessary
1921 * @vcpu: The corresponding virtual cpu 1915 * @vcpu: The corresponding virtual cpu
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index c446aabf60d3..1e70e00d3c5e 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -175,6 +175,7 @@ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
175 return kvm->arch.user_cpu_state_ctrl != 0; 175 return kvm->arch.user_cpu_state_ctrl != 0;
176} 176}
177 177
178/* implemented in interrupt.c */
178int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); 179int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
179void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu); 180void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
180enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); 181enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
@@ -185,7 +186,25 @@ int __must_check kvm_s390_inject_vm(struct kvm *kvm,
185 struct kvm_s390_interrupt *s390int); 186 struct kvm_s390_interrupt *s390int);
186int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 187int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
187 struct kvm_s390_irq *irq); 188 struct kvm_s390_irq *irq);
188int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); 189static inline int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
190 struct kvm_s390_pgm_info *pgm_info)
191{
192 struct kvm_s390_irq irq = {
193 .type = KVM_S390_PROGRAM_INT,
194 .u.pgm = *pgm_info,
195 };
196
197 return kvm_s390_inject_vcpu(vcpu, &irq);
198}
199static inline int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
200{
201 struct kvm_s390_irq irq = {
202 .type = KVM_S390_PROGRAM_INT,
203 .u.pgm.code = code,
204 };
205
206 return kvm_s390_inject_vcpu(vcpu, &irq);
207}
189struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, 208struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
190 u64 isc_mask, u32 schid); 209 u64 isc_mask, u32 schid);
191int kvm_s390_reinject_io_int(struct kvm *kvm, 210int kvm_s390_reinject_io_int(struct kvm *kvm,
@@ -212,6 +231,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
212int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); 231int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
213 232
214/* implemented in kvm-s390.c */ 233/* implemented in kvm-s390.c */
234void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
215long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); 235long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
216int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); 236int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
217int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, 237int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
@@ -231,9 +251,6 @@ extern unsigned long kvm_s390_fac_list_mask[];
231 251
232/* implemented in diag.c */ 252/* implemented in diag.c */
233int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); 253int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
234/* implemented in interrupt.c */
235int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
236 struct kvm_s390_pgm_info *pgm_info);
237 254
238static inline void kvm_s390_vcpu_block_all(struct kvm *kvm) 255static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
239{ 256{
@@ -254,6 +271,16 @@ static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
254 kvm_s390_vcpu_unblock(vcpu); 271 kvm_s390_vcpu_unblock(vcpu);
255} 272}
256 273
274static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm)
275{
276 u64 rc;
277
278 preempt_disable();
279 rc = get_tod_clock_fast() + kvm->arch.epoch;
280 preempt_enable();
281 return rc;
282}
283
257/** 284/**
258 * kvm_s390_inject_prog_cond - conditionally inject a program check 285 * kvm_s390_inject_prog_cond - conditionally inject a program check
259 * @vcpu: virtual cpu 286 * @vcpu: virtual cpu
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 4d21dc4d1a84..77191b85ea7a 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -33,11 +33,9 @@
33/* Handle SCK (SET CLOCK) interception */ 33/* Handle SCK (SET CLOCK) interception */
34static int handle_set_clock(struct kvm_vcpu *vcpu) 34static int handle_set_clock(struct kvm_vcpu *vcpu)
35{ 35{
36 struct kvm_vcpu *cpup; 36 int rc;
37 s64 hostclk, val;
38 int i, rc;
39 ar_t ar; 37 ar_t ar;
40 u64 op2; 38 u64 op2, val;
41 39
42 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 40 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
43 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 41 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -49,19 +47,8 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
49 if (rc) 47 if (rc)
50 return kvm_s390_inject_prog_cond(vcpu, rc); 48 return kvm_s390_inject_prog_cond(vcpu, rc);
51 49
52 if (store_tod_clock(&hostclk)) {
53 kvm_s390_set_psw_cc(vcpu, 3);
54 return 0;
55 }
56 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); 50 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
57 val = (val - hostclk) & ~0x3fUL; 51 kvm_s390_set_tod_clock(vcpu->kvm, val);
58
59 mutex_lock(&vcpu->kvm->lock);
60 preempt_disable();
61 kvm_for_each_vcpu(i, cpup, vcpu->kvm)
62 cpup->arch.sie_block->epoch = val;
63 preempt_enable();
64 mutex_unlock(&vcpu->kvm->lock);
65 52
66 kvm_s390_set_psw_cc(vcpu, 0); 53 kvm_s390_set_psw_cc(vcpu, 0);
67 return 0; 54 return 0;