diff options
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/kvm/interrupt.c | 24 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.h | 1 | ||||
-rw-r--r-- | arch/s390/kvm/sigp.c | 20 |
3 files changed, 22 insertions, 23 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 86575b4cdc1c..65396e14ff05 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -617,12 +617,22 @@ no_timer: | |||
617 | return 0; | 617 | return 0; |
618 | } | 618 | } |
619 | 619 | ||
620 | void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) | ||
621 | { | ||
622 | if (waitqueue_active(&vcpu->wq)) { | ||
623 | /* | ||
624 | * The vcpu gave up the cpu voluntarily, mark it as a good | ||
625 | * yield-candidate. | ||
626 | */ | ||
627 | vcpu->preempted = true; | ||
628 | wake_up_interruptible(&vcpu->wq); | ||
629 | } | ||
630 | } | ||
631 | |||
620 | void kvm_s390_tasklet(unsigned long parm) | 632 | void kvm_s390_tasklet(unsigned long parm) |
621 | { | 633 | { |
622 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; | 634 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; |
623 | 635 | kvm_s390_vcpu_wakeup(vcpu); | |
624 | if (waitqueue_active(&vcpu->wq)) | ||
625 | wake_up_interruptible(&vcpu->wq); | ||
626 | } | 636 | } |
627 | 637 | ||
628 | /* | 638 | /* |
@@ -905,10 +915,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | |||
905 | li = &dst_vcpu->arch.local_int; | 915 | li = &dst_vcpu->arch.local_int; |
906 | spin_lock(&li->lock); | 916 | spin_lock(&li->lock); |
907 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 917 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); |
908 | if (waitqueue_active(li->wq)) | ||
909 | wake_up_interruptible(li->wq); | ||
910 | kvm_get_vcpu(kvm, sigcpu)->preempted = true; | ||
911 | spin_unlock(&li->lock); | 918 | spin_unlock(&li->lock); |
919 | kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); | ||
912 | unlock_fi: | 920 | unlock_fi: |
913 | spin_unlock(&fi->lock); | 921 | spin_unlock(&fi->lock); |
914 | mutex_unlock(&kvm->lock); | 922 | mutex_unlock(&kvm->lock); |
@@ -1059,11 +1067,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
1059 | if (inti->type == KVM_S390_SIGP_STOP) | 1067 | if (inti->type == KVM_S390_SIGP_STOP) |
1060 | li->action_bits |= ACTION_STOP_ON_STOP; | 1068 | li->action_bits |= ACTION_STOP_ON_STOP; |
1061 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1069 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); |
1062 | if (waitqueue_active(&vcpu->wq)) | ||
1063 | wake_up_interruptible(&vcpu->wq); | ||
1064 | vcpu->preempted = true; | ||
1065 | spin_unlock(&li->lock); | 1070 | spin_unlock(&li->lock); |
1066 | mutex_unlock(&vcpu->kvm->lock); | 1071 | mutex_unlock(&vcpu->kvm->lock); |
1072 | kvm_s390_vcpu_wakeup(vcpu); | ||
1067 | return 0; | 1073 | return 0; |
1068 | } | 1074 | } |
1069 | 1075 | ||
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 33a0e4bed2a5..665eaccb9ca5 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -136,6 +136,7 @@ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) | |||
136 | } | 136 | } |
137 | 137 | ||
138 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); | 138 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); |
139 | void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu); | ||
139 | enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); | 140 | enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); |
140 | void kvm_s390_tasklet(unsigned long parm); | 141 | void kvm_s390_tasklet(unsigned long parm); |
141 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); | 142 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 946992f7bb25..c6f1c2bc9753 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -125,8 +125,9 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
125 | return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; | 125 | return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; |
126 | } | 126 | } |
127 | 127 | ||
128 | static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) | 128 | static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action) |
129 | { | 129 | { |
130 | struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; | ||
130 | struct kvm_s390_interrupt_info *inti; | 131 | struct kvm_s390_interrupt_info *inti; |
131 | int rc = SIGP_CC_ORDER_CODE_ACCEPTED; | 132 | int rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
132 | 133 | ||
@@ -151,8 +152,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) | |||
151 | atomic_set(&li->active, 1); | 152 | atomic_set(&li->active, 1); |
152 | li->action_bits |= action; | 153 | li->action_bits |= action; |
153 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); | 154 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); |
154 | if (waitqueue_active(li->wq)) | 155 | kvm_s390_vcpu_wakeup(dst_vcpu); |
155 | wake_up_interruptible(li->wq); | ||
156 | out: | 156 | out: |
157 | spin_unlock(&li->lock); | 157 | spin_unlock(&li->lock); |
158 | 158 | ||
@@ -161,7 +161,6 @@ out: | |||
161 | 161 | ||
162 | static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) | 162 | static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) |
163 | { | 163 | { |
164 | struct kvm_s390_local_interrupt *li; | ||
165 | struct kvm_vcpu *dst_vcpu = NULL; | 164 | struct kvm_vcpu *dst_vcpu = NULL; |
166 | int rc; | 165 | int rc; |
167 | 166 | ||
@@ -171,9 +170,8 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) | |||
171 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | 170 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); |
172 | if (!dst_vcpu) | 171 | if (!dst_vcpu) |
173 | return SIGP_CC_NOT_OPERATIONAL; | 172 | return SIGP_CC_NOT_OPERATIONAL; |
174 | li = &dst_vcpu->arch.local_int; | ||
175 | 173 | ||
176 | rc = __inject_sigp_stop(li, action); | 174 | rc = __inject_sigp_stop(dst_vcpu, action); |
177 | 175 | ||
178 | VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); | 176 | VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); |
179 | 177 | ||
@@ -258,8 +256,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
258 | 256 | ||
259 | list_add_tail(&inti->list, &li->list); | 257 | list_add_tail(&inti->list, &li->list); |
260 | atomic_set(&li->active, 1); | 258 | atomic_set(&li->active, 1); |
261 | if (waitqueue_active(li->wq)) | 259 | kvm_s390_vcpu_wakeup(dst_vcpu); |
262 | wake_up_interruptible(li->wq); | ||
263 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; | 260 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
264 | 261 | ||
265 | VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); | 262 | VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); |
@@ -466,12 +463,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) | |||
466 | dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | 463 | dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); |
467 | BUG_ON(dest_vcpu == NULL); | 464 | BUG_ON(dest_vcpu == NULL); |
468 | 465 | ||
469 | spin_lock(&dest_vcpu->arch.local_int.lock); | 466 | kvm_s390_vcpu_wakeup(dest_vcpu); |
470 | if (waitqueue_active(&dest_vcpu->wq)) | ||
471 | wake_up_interruptible(&dest_vcpu->wq); | ||
472 | dest_vcpu->preempted = true; | ||
473 | spin_unlock(&dest_vcpu->arch.local_int.lock); | ||
474 | |||
475 | kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); | 467 | kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); |
476 | return 0; | 468 | return 0; |
477 | } | 469 | } |