aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/interrupt.c
diff options
context:
space:
mode:
authorEugene (jno) Dvurechenski <jno@linux.vnet.ibm.com>2015-04-21 09:10:10 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2015-11-30 06:47:06 -0500
commita5bd764734838da64b37d771e5b7814eb1f61ffd (patch)
tree639cd5309b27a64a51a4ff66fc1184f8d9b42510 /arch/s390/kvm/interrupt.c
parent605145103abb21c555d5982073bee29269aaad51 (diff)
KVM: s390: Generalize access to SIGP controls
This patch generalizes access to the SIGP controls, which is a part of SCA. This is to prepare for upcoming introduction of Extended SCA support. Signed-off-by: Eugene (jno) Dvurechenski <jno@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm/interrupt.c')
-rw-r--r--arch/s390/kvm/interrupt.c72
1 files changed, 45 insertions, 27 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 6a75352f453c..2a4718af9dcf 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -34,6 +34,45 @@
34#define PFAULT_DONE 0x0680 34#define PFAULT_DONE 0x0680
35#define VIRTIO_PARAM 0x0d00 35#define VIRTIO_PARAM 0x0d00
36 36
37/* handle external calls via sigp interpretation facility */
38static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
39{
40 struct sca_block *sca = vcpu->kvm->arch.sca;
41 uint8_t sigp_ctrl = sca->cpu[vcpu->vcpu_id].sigp_ctrl;
42
43 if (src_id)
44 *src_id = sigp_ctrl & SIGP_CTRL_SCN_MASK;
45
46 return sigp_ctrl & SIGP_CTRL_C &&
47 atomic_read(&vcpu->arch.sie_block->cpuflags) &
48 CPUSTAT_ECALL_PEND;
49}
50
51static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
52{
53 struct sca_block *sca = vcpu->kvm->arch.sca;
54 uint8_t *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
55 uint8_t new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
56 uint8_t old_val = *sigp_ctrl & ~SIGP_CTRL_C;
57
58 if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
59 /* another external call is pending */
60 return -EBUSY;
61 }
62 atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
63 return 0;
64}
65
66static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
67{
68 struct sca_block *sca = vcpu->kvm->arch.sca;
69 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
70 uint8_t *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
71
72 atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
73 *sigp_ctrl = 0;
74}
75
37int psw_extint_disabled(struct kvm_vcpu *vcpu) 76int psw_extint_disabled(struct kvm_vcpu *vcpu)
38{ 77{
39 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 78 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
@@ -792,13 +831,11 @@ static const deliver_irq_t deliver_irq_funcs[] = {
792int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) 831int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
793{ 832{
794 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 833 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
795 uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
796 834
797 if (!sclp.has_sigpif) 835 if (!sclp.has_sigpif)
798 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); 836 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
799 837
800 return (sigp_ctrl & SIGP_CTRL_C) && 838 return sca_ext_call_pending(vcpu, NULL);
801 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND);
802} 839}
803 840
804int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) 841int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
@@ -909,9 +946,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
909 memset(&li->irq, 0, sizeof(li->irq)); 946 memset(&li->irq, 0, sizeof(li->irq));
910 spin_unlock(&li->lock); 947 spin_unlock(&li->lock);
911 948
912 /* clear pending external calls set by sigp interpretation facility */ 949 sca_clear_ext_call(vcpu);
913 atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
914 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
915} 950}
916 951
917int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 952int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
@@ -1003,21 +1038,6 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1003 return 0; 1038 return 0;
1004} 1039}
1005 1040
1006static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
1007{
1008 unsigned char new_val, old_val;
1009 uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
1010
1011 new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
1012 old_val = *sigp_ctrl & ~SIGP_CTRL_C;
1013 if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
1014 /* another external call is pending */
1015 return -EBUSY;
1016 }
1017 atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
1018 return 0;
1019}
1020
1021static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1041static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1022{ 1042{
1023 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1043 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -1034,7 +1054,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1034 return -EINVAL; 1054 return -EINVAL;
1035 1055
1036 if (sclp.has_sigpif) 1056 if (sclp.has_sigpif)
1037 return __inject_extcall_sigpif(vcpu, src_id); 1057 return sca_inject_ext_call(vcpu, src_id);
1038 1058
1039 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) 1059 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1040 return -EBUSY; 1060 return -EBUSY;
@@ -2203,7 +2223,7 @@ static void store_local_irq(struct kvm_s390_local_interrupt *li,
2203 2223
2204int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) 2224int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
2205{ 2225{
2206 uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl; 2226 int scn;
2207 unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)]; 2227 unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
2208 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 2228 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2209 unsigned long pending_irqs; 2229 unsigned long pending_irqs;
@@ -2243,14 +2263,12 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
2243 } 2263 }
2244 } 2264 }
2245 2265
2246 if ((sigp_ctrl & SIGP_CTRL_C) && 2266 if (sca_ext_call_pending(vcpu, &scn)) {
2247 (atomic_read(&vcpu->arch.sie_block->cpuflags) &
2248 CPUSTAT_ECALL_PEND)) {
2249 if (n + sizeof(irq) > len) 2267 if (n + sizeof(irq) > len)
2250 return -ENOBUFS; 2268 return -ENOBUFS;
2251 memset(&irq, 0, sizeof(irq)); 2269 memset(&irq, 0, sizeof(irq));
2252 irq.type = KVM_S390_INT_EXTERNAL_CALL; 2270 irq.type = KVM_S390_INT_EXTERNAL_CALL;
2253 irq.u.extcall.code = sigp_ctrl & SIGP_CTRL_SCN_MASK; 2271 irq.u.extcall.code = scn;
2254 if (copy_to_user(&buf[n], &irq, sizeof(irq))) 2272 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2255 return -EFAULT; 2273 return -EFAULT;
2256 n += sizeof(irq); 2274 n += sizeof(irq);