aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@de.ibm.com>2009-05-12 11:21:50 -0400
committerAvi Kivity <avi@redhat.com>2009-06-10 04:48:56 -0400
commitb037a4f34ec51b6c8ccb352a04056c04a4bfc269 (patch)
tree9ae7a04c170ecfca9ac2973c69383454c0aa8e60 /arch/s390/kvm
parentca8723023f25c9a70d76cbd6101f8fb4ffec2fa0 (diff)
KVM: s390: optimize float int lock: spin_lock_bh --> spin_lock
The floating interrupt lock is only taken in process context. We can replace all spin_lock_bh with standard spin_lock calls. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Christian Ehrhardt <ehrhardt@de.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/interrupt.c20
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/kvm/priv.c4
-rw-r--r--arch/s390/kvm/sigp.c16
4 files changed, 22 insertions, 22 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index a48830fa9c59..f04f5301b1b4 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -301,13 +301,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
301 } 301 }
302 302
303 if ((!rc) && atomic_read(&fi->active)) { 303 if ((!rc) && atomic_read(&fi->active)) {
304 spin_lock_bh(&fi->lock); 304 spin_lock(&fi->lock);
305 list_for_each_entry(inti, &fi->list, list) 305 list_for_each_entry(inti, &fi->list, list)
306 if (__interrupt_is_deliverable(vcpu, inti)) { 306 if (__interrupt_is_deliverable(vcpu, inti)) {
307 rc = 1; 307 rc = 1;
308 break; 308 break;
309 } 309 }
310 spin_unlock_bh(&fi->lock); 310 spin_unlock(&fi->lock);
311 } 311 }
312 312
313 if ((!rc) && (vcpu->arch.sie_block->ckc < 313 if ((!rc) && (vcpu->arch.sie_block->ckc <
@@ -368,7 +368,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
368 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 368 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
369 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); 369 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
370no_timer: 370no_timer:
371 spin_lock_bh(&vcpu->arch.local_int.float_int->lock); 371 spin_lock(&vcpu->arch.local_int.float_int->lock);
372 spin_lock_bh(&vcpu->arch.local_int.lock); 372 spin_lock_bh(&vcpu->arch.local_int.lock);
373 add_wait_queue(&vcpu->arch.local_int.wq, &wait); 373 add_wait_queue(&vcpu->arch.local_int.wq, &wait);
374 while (list_empty(&vcpu->arch.local_int.list) && 374 while (list_empty(&vcpu->arch.local_int.list) &&
@@ -377,18 +377,18 @@ no_timer:
377 !signal_pending(current)) { 377 !signal_pending(current)) {
378 set_current_state(TASK_INTERRUPTIBLE); 378 set_current_state(TASK_INTERRUPTIBLE);
379 spin_unlock_bh(&vcpu->arch.local_int.lock); 379 spin_unlock_bh(&vcpu->arch.local_int.lock);
380 spin_unlock_bh(&vcpu->arch.local_int.float_int->lock); 380 spin_unlock(&vcpu->arch.local_int.float_int->lock);
381 vcpu_put(vcpu); 381 vcpu_put(vcpu);
382 schedule(); 382 schedule();
383 vcpu_load(vcpu); 383 vcpu_load(vcpu);
384 spin_lock_bh(&vcpu->arch.local_int.float_int->lock); 384 spin_lock(&vcpu->arch.local_int.float_int->lock);
385 spin_lock_bh(&vcpu->arch.local_int.lock); 385 spin_lock_bh(&vcpu->arch.local_int.lock);
386 } 386 }
387 __unset_cpu_idle(vcpu); 387 __unset_cpu_idle(vcpu);
388 __set_current_state(TASK_RUNNING); 388 __set_current_state(TASK_RUNNING);
389 remove_wait_queue(&vcpu->wq, &wait); 389 remove_wait_queue(&vcpu->wq, &wait);
390 spin_unlock_bh(&vcpu->arch.local_int.lock); 390 spin_unlock_bh(&vcpu->arch.local_int.lock);
391 spin_unlock_bh(&vcpu->arch.local_int.float_int->lock); 391 spin_unlock(&vcpu->arch.local_int.float_int->lock);
392 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 392 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
393 return 0; 393 return 0;
394} 394}
@@ -455,7 +455,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
455 if (atomic_read(&fi->active)) { 455 if (atomic_read(&fi->active)) {
456 do { 456 do {
457 deliver = 0; 457 deliver = 0;
458 spin_lock_bh(&fi->lock); 458 spin_lock(&fi->lock);
459 list_for_each_entry_safe(inti, n, &fi->list, list) { 459 list_for_each_entry_safe(inti, n, &fi->list, list) {
460 if (__interrupt_is_deliverable(vcpu, inti)) { 460 if (__interrupt_is_deliverable(vcpu, inti)) {
461 list_del(&inti->list); 461 list_del(&inti->list);
@@ -466,7 +466,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
466 } 466 }
467 if (list_empty(&fi->list)) 467 if (list_empty(&fi->list))
468 atomic_set(&fi->active, 0); 468 atomic_set(&fi->active, 0);
469 spin_unlock_bh(&fi->lock); 469 spin_unlock(&fi->lock);
470 if (deliver) { 470 if (deliver) {
471 __do_deliver_interrupt(vcpu, inti); 471 __do_deliver_interrupt(vcpu, inti);
472 kfree(inti); 472 kfree(inti);
@@ -531,7 +531,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
531 531
532 mutex_lock(&kvm->lock); 532 mutex_lock(&kvm->lock);
533 fi = &kvm->arch.float_int; 533 fi = &kvm->arch.float_int;
534 spin_lock_bh(&fi->lock); 534 spin_lock(&fi->lock);
535 list_add_tail(&inti->list, &fi->list); 535 list_add_tail(&inti->list, &fi->list);
536 atomic_set(&fi->active, 1); 536 atomic_set(&fi->active, 1);
537 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); 537 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
@@ -548,7 +548,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
548 if (waitqueue_active(&li->wq)) 548 if (waitqueue_active(&li->wq))
549 wake_up_interruptible(&li->wq); 549 wake_up_interruptible(&li->wq);
550 spin_unlock_bh(&li->lock); 550 spin_unlock_bh(&li->lock);
551 spin_unlock_bh(&fi->lock); 551 spin_unlock(&fi->lock);
552 mutex_unlock(&kvm->lock); 552 mutex_unlock(&kvm->lock);
553 return 0; 553 return 0;
554} 554}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index dc3d06811fd8..36c654d2d64a 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -318,11 +318,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
318 spin_lock_init(&vcpu->arch.local_int.lock); 318 spin_lock_init(&vcpu->arch.local_int.lock);
319 INIT_LIST_HEAD(&vcpu->arch.local_int.list); 319 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
320 vcpu->arch.local_int.float_int = &kvm->arch.float_int; 320 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
321 spin_lock_bh(&kvm->arch.float_int.lock); 321 spin_lock(&kvm->arch.float_int.lock);
322 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int; 322 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
323 init_waitqueue_head(&vcpu->arch.local_int.wq); 323 init_waitqueue_head(&vcpu->arch.local_int.wq);
324 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 324 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
325 spin_unlock_bh(&kvm->arch.float_int.lock); 325 spin_unlock(&kvm->arch.float_int.lock);
326 326
327 rc = kvm_vcpu_init(vcpu, kvm, id); 327 rc = kvm_vcpu_init(vcpu, kvm, id);
328 if (rc) 328 if (rc)
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 4b88834b8dd8..93ecd06e1a74 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -204,11 +204,11 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
204 int cpus = 0; 204 int cpus = 0;
205 int n; 205 int n;
206 206
207 spin_lock_bh(&fi->lock); 207 spin_lock(&fi->lock);
208 for (n = 0; n < KVM_MAX_VCPUS; n++) 208 for (n = 0; n < KVM_MAX_VCPUS; n++)
209 if (fi->local_int[n]) 209 if (fi->local_int[n])
210 cpus++; 210 cpus++;
211 spin_unlock_bh(&fi->lock); 211 spin_unlock(&fi->lock);
212 212
213 /* deal with other level 3 hypervisors */ 213 /* deal with other level 3 hypervisors */
214 if (stsi(mem, 3, 2, 2) == -ENOSYS) 214 if (stsi(mem, 3, 2, 2) == -ENOSYS)
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index f27dbedf0866..36678835034d 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -52,7 +52,7 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
52 if (cpu_addr >= KVM_MAX_VCPUS) 52 if (cpu_addr >= KVM_MAX_VCPUS)
53 return 3; /* not operational */ 53 return 3; /* not operational */
54 54
55 spin_lock_bh(&fi->lock); 55 spin_lock(&fi->lock);
56 if (fi->local_int[cpu_addr] == NULL) 56 if (fi->local_int[cpu_addr] == NULL)
57 rc = 3; /* not operational */ 57 rc = 3; /* not operational */
58 else if (atomic_read(fi->local_int[cpu_addr]->cpuflags) 58 else if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
@@ -64,7 +64,7 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
64 *reg |= SIGP_STAT_STOPPED; 64 *reg |= SIGP_STAT_STOPPED;
65 rc = 1; /* status stored */ 65 rc = 1; /* status stored */
66 } 66 }
67 spin_unlock_bh(&fi->lock); 67 spin_unlock(&fi->lock);
68 68
69 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); 69 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
70 return rc; 70 return rc;
@@ -86,7 +86,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
86 86
87 inti->type = KVM_S390_INT_EMERGENCY; 87 inti->type = KVM_S390_INT_EMERGENCY;
88 88
89 spin_lock_bh(&fi->lock); 89 spin_lock(&fi->lock);
90 li = fi->local_int[cpu_addr]; 90 li = fi->local_int[cpu_addr];
91 if (li == NULL) { 91 if (li == NULL) {
92 rc = 3; /* not operational */ 92 rc = 3; /* not operational */
@@ -102,7 +102,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
102 spin_unlock_bh(&li->lock); 102 spin_unlock_bh(&li->lock);
103 rc = 0; /* order accepted */ 103 rc = 0; /* order accepted */
104unlock: 104unlock:
105 spin_unlock_bh(&fi->lock); 105 spin_unlock(&fi->lock);
106 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); 106 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
107 return rc; 107 return rc;
108} 108}
@@ -123,7 +123,7 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
123 123
124 inti->type = KVM_S390_SIGP_STOP; 124 inti->type = KVM_S390_SIGP_STOP;
125 125
126 spin_lock_bh(&fi->lock); 126 spin_lock(&fi->lock);
127 li = fi->local_int[cpu_addr]; 127 li = fi->local_int[cpu_addr];
128 if (li == NULL) { 128 if (li == NULL) {
129 rc = 3; /* not operational */ 129 rc = 3; /* not operational */
@@ -142,7 +142,7 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
142 spin_unlock_bh(&li->lock); 142 spin_unlock_bh(&li->lock);
143 rc = 0; /* order accepted */ 143 rc = 0; /* order accepted */
144unlock: 144unlock:
145 spin_unlock_bh(&fi->lock); 145 spin_unlock(&fi->lock);
146 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); 146 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
147 return rc; 147 return rc;
148} 148}
@@ -188,7 +188,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
188 if (!inti) 188 if (!inti)
189 return 2; /* busy */ 189 return 2; /* busy */
190 190
191 spin_lock_bh(&fi->lock); 191 spin_lock(&fi->lock);
192 li = fi->local_int[cpu_addr]; 192 li = fi->local_int[cpu_addr];
193 193
194 if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) { 194 if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) {
@@ -220,7 +220,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
220out_li: 220out_li:
221 spin_unlock_bh(&li->lock); 221 spin_unlock_bh(&li->lock);
222out_fi: 222out_fi:
223 spin_unlock_bh(&fi->lock); 223 spin_unlock(&fi->lock);
224 return rc; 224 return rc;
225} 225}
226 226