aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorJens Freimann <jfrei@linux.vnet.ibm.com>2014-02-25 09:36:45 -0500
committerChristian Borntraeger <borntraeger@de.ibm.com>2014-03-04 04:41:03 -0500
commit1ee0bc559dc34fe36a29494faf7b7c91533bd31c (patch)
tree36e5f09fef31f25b5d3efe8afc45336ee677b368 /arch/s390/kvm
parent13b191ae4afc0c29a5cd768f521ede5c72a608cb (diff)
KVM: s390: get rid of local_int array
We can use kvm_get_vcpu() now and don't need the local_int array in the floating_int struct anymore. This also means we don't have to hold the float_int.lock in some places. Signed-off-by: Jens Freimann <jfrei@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/interrupt.c6
-rw-r--r--arch/s390/kvm/kvm-s390.c5
-rw-r--r--arch/s390/kvm/sigp.c124
3 files changed, 56 insertions, 79 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index fff070bd0159..1d0f9d532c0b 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -692,6 +692,7 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
692 struct kvm_s390_local_interrupt *li; 692 struct kvm_s390_local_interrupt *li;
693 struct kvm_s390_float_interrupt *fi; 693 struct kvm_s390_float_interrupt *fi;
694 struct kvm_s390_interrupt_info *iter; 694 struct kvm_s390_interrupt_info *iter;
695 struct kvm_vcpu *dst_vcpu = NULL;
695 int sigcpu; 696 int sigcpu;
696 int rc = 0; 697 int rc = 0;
697 698
@@ -726,9 +727,10 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
726 sigcpu = fi->next_rr_cpu++; 727 sigcpu = fi->next_rr_cpu++;
727 if (sigcpu == KVM_MAX_VCPUS) 728 if (sigcpu == KVM_MAX_VCPUS)
728 sigcpu = fi->next_rr_cpu = 0; 729 sigcpu = fi->next_rr_cpu = 0;
729 } while (fi->local_int[sigcpu] == NULL); 730 } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
730 } 731 }
731 li = fi->local_int[sigcpu]; 732 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
733 li = &dst_vcpu->arch.local_int;
732 spin_lock_bh(&li->lock); 734 spin_lock_bh(&li->lock);
733 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 735 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
734 if (waitqueue_active(li->wq)) 736 if (waitqueue_active(li->wq))
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index a3e4c07ec3a5..9136f8d40850 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -460,11 +460,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
460 spin_lock_init(&vcpu->arch.local_int.lock); 460 spin_lock_init(&vcpu->arch.local_int.lock);
461 INIT_LIST_HEAD(&vcpu->arch.local_int.list); 461 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
462 vcpu->arch.local_int.float_int = &kvm->arch.float_int; 462 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
463 spin_lock(&kvm->arch.float_int.lock);
464 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
465 vcpu->arch.local_int.wq = &vcpu->wq; 463 vcpu->arch.local_int.wq = &vcpu->wq;
466 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 464 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
467 spin_unlock(&kvm->arch.float_int.lock);
468 465
469 rc = kvm_vcpu_init(vcpu, kvm, id); 466 rc = kvm_vcpu_init(vcpu, kvm, id);
470 if (rc) 467 if (rc)
@@ -952,7 +949,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
952 949
953 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 950 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
954 951
955 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); 952 BUG_ON(kvm_get_vcpu(vcpu->kvm, vcpu->vcpu_id) == NULL);
956 953
957 switch (kvm_run->exit_reason) { 954 switch (kvm_run->exit_reason) {
958 case KVM_EXIT_S390_SIEIC: 955 case KVM_EXIT_S390_SIEIC:
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 466eefa18708..3fe44c441609 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -23,29 +23,30 @@
23static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, 23static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
24 u64 *reg) 24 u64 *reg)
25{ 25{
26 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 26 struct kvm_s390_local_interrupt *li;
27 struct kvm_vcpu *dst_vcpu = NULL;
28 int cpuflags;
27 int rc; 29 int rc;
28 30
29 if (cpu_addr >= KVM_MAX_VCPUS) 31 if (cpu_addr >= KVM_MAX_VCPUS)
30 return SIGP_CC_NOT_OPERATIONAL; 32 return SIGP_CC_NOT_OPERATIONAL;
31 33
32 spin_lock(&fi->lock); 34 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
33 if (fi->local_int[cpu_addr] == NULL) 35 if (!dst_vcpu)
34 rc = SIGP_CC_NOT_OPERATIONAL; 36 return SIGP_CC_NOT_OPERATIONAL;
35 else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags) 37 li = &dst_vcpu->arch.local_int;
36 & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED))) 38
39 cpuflags = atomic_read(li->cpuflags);
40 if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
37 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 41 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
38 else { 42 else {
39 *reg &= 0xffffffff00000000UL; 43 *reg &= 0xffffffff00000000UL;
40 if (atomic_read(fi->local_int[cpu_addr]->cpuflags) 44 if (cpuflags & CPUSTAT_ECALL_PEND)
41 & CPUSTAT_ECALL_PEND)
42 *reg |= SIGP_STATUS_EXT_CALL_PENDING; 45 *reg |= SIGP_STATUS_EXT_CALL_PENDING;
43 if (atomic_read(fi->local_int[cpu_addr]->cpuflags) 46 if (cpuflags & CPUSTAT_STOPPED)
44 & CPUSTAT_STOPPED)
45 *reg |= SIGP_STATUS_STOPPED; 47 *reg |= SIGP_STATUS_STOPPED;
46 rc = SIGP_CC_STATUS_STORED; 48 rc = SIGP_CC_STATUS_STORED;
47 } 49 }
48 spin_unlock(&fi->lock);
49 50
50 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); 51 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
51 return rc; 52 return rc;
@@ -53,10 +54,9 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
53 54
54static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) 55static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
55{ 56{
56 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
57 struct kvm_s390_local_interrupt *li; 57 struct kvm_s390_local_interrupt *li;
58 struct kvm_s390_interrupt_info *inti; 58 struct kvm_s390_interrupt_info *inti;
59 int rc; 59 struct kvm_vcpu *dst_vcpu = NULL;
60 60
61 if (cpu_addr >= KVM_MAX_VCPUS) 61 if (cpu_addr >= KVM_MAX_VCPUS)
62 return SIGP_CC_NOT_OPERATIONAL; 62 return SIGP_CC_NOT_OPERATIONAL;
@@ -68,13 +68,10 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
68 inti->type = KVM_S390_INT_EMERGENCY; 68 inti->type = KVM_S390_INT_EMERGENCY;
69 inti->emerg.code = vcpu->vcpu_id; 69 inti->emerg.code = vcpu->vcpu_id;
70 70
71 spin_lock(&fi->lock); 71 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
72 li = fi->local_int[cpu_addr]; 72 if (!dst_vcpu)
73 if (li == NULL) { 73 return SIGP_CC_NOT_OPERATIONAL;
74 rc = SIGP_CC_NOT_OPERATIONAL; 74 li = &dst_vcpu->arch.local_int;
75 kfree(inti);
76 goto unlock;
77 }
78 spin_lock_bh(&li->lock); 75 spin_lock_bh(&li->lock);
79 list_add_tail(&inti->list, &li->list); 76 list_add_tail(&inti->list, &li->list);
80 atomic_set(&li->active, 1); 77 atomic_set(&li->active, 1);
@@ -82,11 +79,9 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
82 if (waitqueue_active(li->wq)) 79 if (waitqueue_active(li->wq))
83 wake_up_interruptible(li->wq); 80 wake_up_interruptible(li->wq);
84 spin_unlock_bh(&li->lock); 81 spin_unlock_bh(&li->lock);
85 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
86 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); 82 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
87unlock: 83
88 spin_unlock(&fi->lock); 84 return SIGP_CC_ORDER_CODE_ACCEPTED;
89 return rc;
90} 85}
91 86
92static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, 87static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
@@ -122,10 +117,9 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
122 117
123static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) 118static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
124{ 119{
125 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
126 struct kvm_s390_local_interrupt *li; 120 struct kvm_s390_local_interrupt *li;
127 struct kvm_s390_interrupt_info *inti; 121 struct kvm_s390_interrupt_info *inti;
128 int rc; 122 struct kvm_vcpu *dst_vcpu = NULL;
129 123
130 if (cpu_addr >= KVM_MAX_VCPUS) 124 if (cpu_addr >= KVM_MAX_VCPUS)
131 return SIGP_CC_NOT_OPERATIONAL; 125 return SIGP_CC_NOT_OPERATIONAL;
@@ -137,13 +131,10 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
137 inti->type = KVM_S390_INT_EXTERNAL_CALL; 131 inti->type = KVM_S390_INT_EXTERNAL_CALL;
138 inti->extcall.code = vcpu->vcpu_id; 132 inti->extcall.code = vcpu->vcpu_id;
139 133
140 spin_lock(&fi->lock); 134 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
141 li = fi->local_int[cpu_addr]; 135 if (!dst_vcpu)
142 if (li == NULL) { 136 return SIGP_CC_NOT_OPERATIONAL;
143 rc = SIGP_CC_NOT_OPERATIONAL; 137 li = &dst_vcpu->arch.local_int;
144 kfree(inti);
145 goto unlock;
146 }
147 spin_lock_bh(&li->lock); 138 spin_lock_bh(&li->lock);
148 list_add_tail(&inti->list, &li->list); 139 list_add_tail(&inti->list, &li->list);
149 atomic_set(&li->active, 1); 140 atomic_set(&li->active, 1);
@@ -151,11 +142,9 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
151 if (waitqueue_active(li->wq)) 142 if (waitqueue_active(li->wq))
152 wake_up_interruptible(li->wq); 143 wake_up_interruptible(li->wq);
153 spin_unlock_bh(&li->lock); 144 spin_unlock_bh(&li->lock);
154 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
155 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); 145 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
156unlock: 146
157 spin_unlock(&fi->lock); 147 return SIGP_CC_ORDER_CODE_ACCEPTED;
158 return rc;
159} 148}
160 149
161static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) 150static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
@@ -189,31 +178,26 @@ out:
189 178
190static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) 179static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
191{ 180{
192 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
193 struct kvm_s390_local_interrupt *li; 181 struct kvm_s390_local_interrupt *li;
182 struct kvm_vcpu *dst_vcpu = NULL;
194 int rc; 183 int rc;
195 184
196 if (cpu_addr >= KVM_MAX_VCPUS) 185 if (cpu_addr >= KVM_MAX_VCPUS)
197 return SIGP_CC_NOT_OPERATIONAL; 186 return SIGP_CC_NOT_OPERATIONAL;
198 187
199 spin_lock(&fi->lock); 188 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
200 li = fi->local_int[cpu_addr]; 189 if (!dst_vcpu)
201 if (li == NULL) { 190 return SIGP_CC_NOT_OPERATIONAL;
202 rc = SIGP_CC_NOT_OPERATIONAL; 191 li = &dst_vcpu->arch.local_int;
203 goto unlock;
204 }
205 192
206 rc = __inject_sigp_stop(li, action); 193 rc = __inject_sigp_stop(li, action);
207 194
208unlock:
209 spin_unlock(&fi->lock);
210 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); 195 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
211 196
212 if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) { 197 if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
213 /* If the CPU has already been stopped, we still have 198 /* If the CPU has already been stopped, we still have
214 * to save the status when doing stop-and-store. This 199 * to save the status when doing stop-and-store. This
215 * has to be done after unlocking all spinlocks. */ 200 * has to be done after unlocking all spinlocks. */
216 struct kvm_vcpu *dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
217 rc = kvm_s390_store_status_unloaded(dst_vcpu, 201 rc = kvm_s390_store_status_unloaded(dst_vcpu,
218 KVM_S390_STORE_STATUS_NOADDR); 202 KVM_S390_STORE_STATUS_NOADDR);
219 } 203 }
@@ -333,28 +317,26 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
333static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, 317static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
334 u64 *reg) 318 u64 *reg)
335{ 319{
320 struct kvm_s390_local_interrupt *li;
321 struct kvm_vcpu *dst_vcpu = NULL;
336 int rc; 322 int rc;
337 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
338 323
339 if (cpu_addr >= KVM_MAX_VCPUS) 324 if (cpu_addr >= KVM_MAX_VCPUS)
340 return SIGP_CC_NOT_OPERATIONAL; 325 return SIGP_CC_NOT_OPERATIONAL;
341 326
342 spin_lock(&fi->lock); 327 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
343 if (fi->local_int[cpu_addr] == NULL) 328 if (!dst_vcpu)
344 rc = SIGP_CC_NOT_OPERATIONAL; 329 return SIGP_CC_NOT_OPERATIONAL;
345 else { 330 li = &dst_vcpu->arch.local_int;
346 if (atomic_read(fi->local_int[cpu_addr]->cpuflags) 331 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
347 & CPUSTAT_RUNNING) { 332 /* running */
348 /* running */ 333 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
349 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 334 } else {
350 } else { 335 /* not running */
351 /* not running */ 336 *reg &= 0xffffffff00000000UL;
352 *reg &= 0xffffffff00000000UL; 337 *reg |= SIGP_STATUS_NOT_RUNNING;
353 *reg |= SIGP_STATUS_NOT_RUNNING; 338 rc = SIGP_CC_STATUS_STORED;
354 rc = SIGP_CC_STATUS_STORED;
355 }
356 } 339 }
357 spin_unlock(&fi->lock);
358 340
359 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr, 341 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
360 rc); 342 rc);
@@ -365,26 +347,22 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
365/* Test whether the destination CPU is available and not busy */ 347/* Test whether the destination CPU is available and not busy */
366static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) 348static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
367{ 349{
368 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
369 struct kvm_s390_local_interrupt *li; 350 struct kvm_s390_local_interrupt *li;
370 int rc = SIGP_CC_ORDER_CODE_ACCEPTED; 351 int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
352 struct kvm_vcpu *dst_vcpu = NULL;
371 353
372 if (cpu_addr >= KVM_MAX_VCPUS) 354 if (cpu_addr >= KVM_MAX_VCPUS)
373 return SIGP_CC_NOT_OPERATIONAL; 355 return SIGP_CC_NOT_OPERATIONAL;
374 356
375 spin_lock(&fi->lock); 357 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
376 li = fi->local_int[cpu_addr]; 358 if (!dst_vcpu)
377 if (li == NULL) { 359 return SIGP_CC_NOT_OPERATIONAL;
378 rc = SIGP_CC_NOT_OPERATIONAL; 360 li = &dst_vcpu->arch.local_int;
379 goto out;
380 }
381
382 spin_lock_bh(&li->lock); 361 spin_lock_bh(&li->lock);
383 if (li->action_bits & ACTION_STOP_ON_STOP) 362 if (li->action_bits & ACTION_STOP_ON_STOP)
384 rc = SIGP_CC_BUSY; 363 rc = SIGP_CC_BUSY;
385 spin_unlock_bh(&li->lock); 364 spin_unlock_bh(&li->lock);
386out: 365
387 spin_unlock(&fi->lock);
388 return rc; 366 return rc;
389} 367}
390 368