aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/sigp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kvm/sigp.c')
-rw-r--r--arch/s390/kvm/sigp.c157
1 files changed, 66 insertions, 91 deletions
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 87c2b3a3bd3e..26caeb530a78 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -23,29 +23,30 @@
23static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, 23static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
24 u64 *reg) 24 u64 *reg)
25{ 25{
26 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 26 struct kvm_s390_local_interrupt *li;
27 struct kvm_vcpu *dst_vcpu = NULL;
28 int cpuflags;
27 int rc; 29 int rc;
28 30
29 if (cpu_addr >= KVM_MAX_VCPUS) 31 if (cpu_addr >= KVM_MAX_VCPUS)
30 return SIGP_CC_NOT_OPERATIONAL; 32 return SIGP_CC_NOT_OPERATIONAL;
31 33
32 spin_lock(&fi->lock); 34 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
33 if (fi->local_int[cpu_addr] == NULL) 35 if (!dst_vcpu)
34 rc = SIGP_CC_NOT_OPERATIONAL; 36 return SIGP_CC_NOT_OPERATIONAL;
35 else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags) 37 li = &dst_vcpu->arch.local_int;
36 & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED))) 38
39 cpuflags = atomic_read(li->cpuflags);
40 if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
37 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 41 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
38 else { 42 else {
39 *reg &= 0xffffffff00000000UL; 43 *reg &= 0xffffffff00000000UL;
40 if (atomic_read(fi->local_int[cpu_addr]->cpuflags) 44 if (cpuflags & CPUSTAT_ECALL_PEND)
41 & CPUSTAT_ECALL_PEND)
42 *reg |= SIGP_STATUS_EXT_CALL_PENDING; 45 *reg |= SIGP_STATUS_EXT_CALL_PENDING;
43 if (atomic_read(fi->local_int[cpu_addr]->cpuflags) 46 if (cpuflags & CPUSTAT_STOPPED)
44 & CPUSTAT_STOPPED)
45 *reg |= SIGP_STATUS_STOPPED; 47 *reg |= SIGP_STATUS_STOPPED;
46 rc = SIGP_CC_STATUS_STORED; 48 rc = SIGP_CC_STATUS_STORED;
47 } 49 }
48 spin_unlock(&fi->lock);
49 50
50 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); 51 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
51 return rc; 52 return rc;
@@ -53,12 +54,13 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
53 54
54static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) 55static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
55{ 56{
56 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
57 struct kvm_s390_local_interrupt *li; 57 struct kvm_s390_local_interrupt *li;
58 struct kvm_s390_interrupt_info *inti; 58 struct kvm_s390_interrupt_info *inti;
59 int rc; 59 struct kvm_vcpu *dst_vcpu = NULL;
60 60
61 if (cpu_addr >= KVM_MAX_VCPUS) 61 if (cpu_addr < KVM_MAX_VCPUS)
62 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
63 if (!dst_vcpu)
62 return SIGP_CC_NOT_OPERATIONAL; 64 return SIGP_CC_NOT_OPERATIONAL;
63 65
64 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 66 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
@@ -68,13 +70,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
68 inti->type = KVM_S390_INT_EMERGENCY; 70 inti->type = KVM_S390_INT_EMERGENCY;
69 inti->emerg.code = vcpu->vcpu_id; 71 inti->emerg.code = vcpu->vcpu_id;
70 72
71 spin_lock(&fi->lock); 73 li = &dst_vcpu->arch.local_int;
72 li = fi->local_int[cpu_addr];
73 if (li == NULL) {
74 rc = SIGP_CC_NOT_OPERATIONAL;
75 kfree(inti);
76 goto unlock;
77 }
78 spin_lock_bh(&li->lock); 74 spin_lock_bh(&li->lock);
79 list_add_tail(&inti->list, &li->list); 75 list_add_tail(&inti->list, &li->list);
80 atomic_set(&li->active, 1); 76 atomic_set(&li->active, 1);
@@ -82,11 +78,9 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
82 if (waitqueue_active(li->wq)) 78 if (waitqueue_active(li->wq))
83 wake_up_interruptible(li->wq); 79 wake_up_interruptible(li->wq);
84 spin_unlock_bh(&li->lock); 80 spin_unlock_bh(&li->lock);
85 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
86 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); 81 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
87unlock: 82
88 spin_unlock(&fi->lock); 83 return SIGP_CC_ORDER_CODE_ACCEPTED;
89 return rc;
90} 84}
91 85
92static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, 86static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
@@ -122,12 +116,13 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
122 116
123static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) 117static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
124{ 118{
125 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
126 struct kvm_s390_local_interrupt *li; 119 struct kvm_s390_local_interrupt *li;
127 struct kvm_s390_interrupt_info *inti; 120 struct kvm_s390_interrupt_info *inti;
128 int rc; 121 struct kvm_vcpu *dst_vcpu = NULL;
129 122
130 if (cpu_addr >= KVM_MAX_VCPUS) 123 if (cpu_addr < KVM_MAX_VCPUS)
124 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
125 if (!dst_vcpu)
131 return SIGP_CC_NOT_OPERATIONAL; 126 return SIGP_CC_NOT_OPERATIONAL;
132 127
133 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 128 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
@@ -137,13 +132,7 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
137 inti->type = KVM_S390_INT_EXTERNAL_CALL; 132 inti->type = KVM_S390_INT_EXTERNAL_CALL;
138 inti->extcall.code = vcpu->vcpu_id; 133 inti->extcall.code = vcpu->vcpu_id;
139 134
140 spin_lock(&fi->lock); 135 li = &dst_vcpu->arch.local_int;
141 li = fi->local_int[cpu_addr];
142 if (li == NULL) {
143 rc = SIGP_CC_NOT_OPERATIONAL;
144 kfree(inti);
145 goto unlock;
146 }
147 spin_lock_bh(&li->lock); 136 spin_lock_bh(&li->lock);
148 list_add_tail(&inti->list, &li->list); 137 list_add_tail(&inti->list, &li->list);
149 atomic_set(&li->active, 1); 138 atomic_set(&li->active, 1);
@@ -151,11 +140,9 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
151 if (waitqueue_active(li->wq)) 140 if (waitqueue_active(li->wq))
152 wake_up_interruptible(li->wq); 141 wake_up_interruptible(li->wq);
153 spin_unlock_bh(&li->lock); 142 spin_unlock_bh(&li->lock);
154 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
155 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); 143 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
156unlock: 144
157 spin_unlock(&fi->lock); 145 return SIGP_CC_ORDER_CODE_ACCEPTED;
158 return rc;
159} 146}
160 147
161static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) 148static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
@@ -189,31 +176,26 @@ out:
189 176
190static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) 177static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
191{ 178{
192 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
193 struct kvm_s390_local_interrupt *li; 179 struct kvm_s390_local_interrupt *li;
180 struct kvm_vcpu *dst_vcpu = NULL;
194 int rc; 181 int rc;
195 182
196 if (cpu_addr >= KVM_MAX_VCPUS) 183 if (cpu_addr >= KVM_MAX_VCPUS)
197 return SIGP_CC_NOT_OPERATIONAL; 184 return SIGP_CC_NOT_OPERATIONAL;
198 185
199 spin_lock(&fi->lock); 186 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
200 li = fi->local_int[cpu_addr]; 187 if (!dst_vcpu)
201 if (li == NULL) { 188 return SIGP_CC_NOT_OPERATIONAL;
202 rc = SIGP_CC_NOT_OPERATIONAL; 189 li = &dst_vcpu->arch.local_int;
203 goto unlock;
204 }
205 190
206 rc = __inject_sigp_stop(li, action); 191 rc = __inject_sigp_stop(li, action);
207 192
208unlock:
209 spin_unlock(&fi->lock);
210 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); 193 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
211 194
212 if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) { 195 if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
213 /* If the CPU has already been stopped, we still have 196 /* If the CPU has already been stopped, we still have
214 * to save the status when doing stop-and-store. This 197 * to save the status when doing stop-and-store. This
215 * has to be done after unlocking all spinlocks. */ 198 * has to be done after unlocking all spinlocks. */
216 struct kvm_vcpu *dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
217 rc = kvm_s390_store_status_unloaded(dst_vcpu, 199 rc = kvm_s390_store_status_unloaded(dst_vcpu,
218 KVM_S390_STORE_STATUS_NOADDR); 200 KVM_S390_STORE_STATUS_NOADDR);
219 } 201 }
@@ -224,6 +206,8 @@ unlock:
224static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) 206static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
225{ 207{
226 int rc; 208 int rc;
209 unsigned int i;
210 struct kvm_vcpu *v;
227 211
228 switch (parameter & 0xff) { 212 switch (parameter & 0xff) {
229 case 0: 213 case 0:
@@ -231,6 +215,11 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
231 break; 215 break;
232 case 1: 216 case 1:
233 case 2: 217 case 2:
218 kvm_for_each_vcpu(i, v, vcpu->kvm) {
219 v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
220 kvm_clear_async_pf_completion_queue(v);
221 }
222
234 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 223 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
235 break; 224 break;
236 default: 225 default:
@@ -242,12 +231,18 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
242static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, 231static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
243 u64 *reg) 232 u64 *reg)
244{ 233{
245 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 234 struct kvm_s390_local_interrupt *li;
246 struct kvm_s390_local_interrupt *li = NULL; 235 struct kvm_vcpu *dst_vcpu = NULL;
247 struct kvm_s390_interrupt_info *inti; 236 struct kvm_s390_interrupt_info *inti;
248 int rc; 237 int rc;
249 u8 tmp; 238 u8 tmp;
250 239
240 if (cpu_addr < KVM_MAX_VCPUS)
241 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
242 if (!dst_vcpu)
243 return SIGP_CC_NOT_OPERATIONAL;
244 li = &dst_vcpu->arch.local_int;
245
251 /* make sure that the new value is valid memory */ 246 /* make sure that the new value is valid memory */
252 address = address & 0x7fffe000u; 247 address = address & 0x7fffe000u;
253 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || 248 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
@@ -261,18 +256,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
261 if (!inti) 256 if (!inti)
262 return SIGP_CC_BUSY; 257 return SIGP_CC_BUSY;
263 258
264 spin_lock(&fi->lock);
265 if (cpu_addr < KVM_MAX_VCPUS)
266 li = fi->local_int[cpu_addr];
267
268 if (li == NULL) {
269 *reg &= 0xffffffff00000000UL;
270 *reg |= SIGP_STATUS_INCORRECT_STATE;
271 rc = SIGP_CC_STATUS_STORED;
272 kfree(inti);
273 goto out_fi;
274 }
275
276 spin_lock_bh(&li->lock); 259 spin_lock_bh(&li->lock);
277 /* cpu must be in stopped state */ 260 /* cpu must be in stopped state */
278 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { 261 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
@@ -295,8 +278,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
295 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); 278 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
296out_li: 279out_li:
297 spin_unlock_bh(&li->lock); 280 spin_unlock_bh(&li->lock);
298out_fi:
299 spin_unlock(&fi->lock);
300 return rc; 281 return rc;
301} 282}
302 283
@@ -334,28 +315,26 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
334static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, 315static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
335 u64 *reg) 316 u64 *reg)
336{ 317{
318 struct kvm_s390_local_interrupt *li;
319 struct kvm_vcpu *dst_vcpu = NULL;
337 int rc; 320 int rc;
338 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
339 321
340 if (cpu_addr >= KVM_MAX_VCPUS) 322 if (cpu_addr >= KVM_MAX_VCPUS)
341 return SIGP_CC_NOT_OPERATIONAL; 323 return SIGP_CC_NOT_OPERATIONAL;
342 324
343 spin_lock(&fi->lock); 325 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
344 if (fi->local_int[cpu_addr] == NULL) 326 if (!dst_vcpu)
345 rc = SIGP_CC_NOT_OPERATIONAL; 327 return SIGP_CC_NOT_OPERATIONAL;
346 else { 328 li = &dst_vcpu->arch.local_int;
347 if (atomic_read(fi->local_int[cpu_addr]->cpuflags) 329 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
348 & CPUSTAT_RUNNING) { 330 /* running */
349 /* running */ 331 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
350 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 332 } else {
351 } else { 333 /* not running */
352 /* not running */ 334 *reg &= 0xffffffff00000000UL;
353 *reg &= 0xffffffff00000000UL; 335 *reg |= SIGP_STATUS_NOT_RUNNING;
354 *reg |= SIGP_STATUS_NOT_RUNNING; 336 rc = SIGP_CC_STATUS_STORED;
355 rc = SIGP_CC_STATUS_STORED;
356 }
357 } 337 }
358 spin_unlock(&fi->lock);
359 338
360 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr, 339 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
361 rc); 340 rc);
@@ -366,26 +345,22 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
366/* Test whether the destination CPU is available and not busy */ 345/* Test whether the destination CPU is available and not busy */
367static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) 346static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
368{ 347{
369 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
370 struct kvm_s390_local_interrupt *li; 348 struct kvm_s390_local_interrupt *li;
371 int rc = SIGP_CC_ORDER_CODE_ACCEPTED; 349 int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
350 struct kvm_vcpu *dst_vcpu = NULL;
372 351
373 if (cpu_addr >= KVM_MAX_VCPUS) 352 if (cpu_addr >= KVM_MAX_VCPUS)
374 return SIGP_CC_NOT_OPERATIONAL; 353 return SIGP_CC_NOT_OPERATIONAL;
375 354
376 spin_lock(&fi->lock); 355 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
377 li = fi->local_int[cpu_addr]; 356 if (!dst_vcpu)
378 if (li == NULL) { 357 return SIGP_CC_NOT_OPERATIONAL;
379 rc = SIGP_CC_NOT_OPERATIONAL; 358 li = &dst_vcpu->arch.local_int;
380 goto out;
381 }
382
383 spin_lock_bh(&li->lock); 359 spin_lock_bh(&li->lock);
384 if (li->action_bits & ACTION_STOP_ON_STOP) 360 if (li->action_bits & ACTION_STOP_ON_STOP)
385 rc = SIGP_CC_BUSY; 361 rc = SIGP_CC_BUSY;
386 spin_unlock_bh(&li->lock); 362 spin_unlock_bh(&li->lock);
387out: 363
388 spin_unlock(&fi->lock);
389 return rc; 364 return rc;
390} 365}
391 366