diff options
author | Christian Borntraeger <borntraeger@de.ibm.com> | 2009-05-12 11:21:50 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-06-10 04:48:56 -0400 |
commit | b037a4f34ec51b6c8ccb352a04056c04a4bfc269 (patch) | |
tree | 9ae7a04c170ecfca9ac2973c69383454c0aa8e60 /arch/s390/kvm/interrupt.c | |
parent | ca8723023f25c9a70d76cbd6101f8fb4ffec2fa0 (diff) |
KVM: s390: optimize float int lock: spin_lock_bh --> spin_lock
The floating interrupt lock is only taken in process context. We can
replace all spin_lock_bh with standard spin_lock calls.
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Christian Ehrhardt <ehrhardt@de.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/s390/kvm/interrupt.c')
-rw-r--r-- | arch/s390/kvm/interrupt.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index a48830fa9c59..f04f5301b1b4 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -301,13 +301,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | |||
301 | } | 301 | } |
302 | 302 | ||
303 | if ((!rc) && atomic_read(&fi->active)) { | 303 | if ((!rc) && atomic_read(&fi->active)) { |
304 | spin_lock_bh(&fi->lock); | 304 | spin_lock(&fi->lock); |
305 | list_for_each_entry(inti, &fi->list, list) | 305 | list_for_each_entry(inti, &fi->list, list) |
306 | if (__interrupt_is_deliverable(vcpu, inti)) { | 306 | if (__interrupt_is_deliverable(vcpu, inti)) { |
307 | rc = 1; | 307 | rc = 1; |
308 | break; | 308 | break; |
309 | } | 309 | } |
310 | spin_unlock_bh(&fi->lock); | 310 | spin_unlock(&fi->lock); |
311 | } | 311 | } |
312 | 312 | ||
313 | if ((!rc) && (vcpu->arch.sie_block->ckc < | 313 | if ((!rc) && (vcpu->arch.sie_block->ckc < |
@@ -368,7 +368,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | |||
368 | hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); | 368 | hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); |
369 | VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); | 369 | VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); |
370 | no_timer: | 370 | no_timer: |
371 | spin_lock_bh(&vcpu->arch.local_int.float_int->lock); | 371 | spin_lock(&vcpu->arch.local_int.float_int->lock); |
372 | spin_lock_bh(&vcpu->arch.local_int.lock); | 372 | spin_lock_bh(&vcpu->arch.local_int.lock); |
373 | add_wait_queue(&vcpu->arch.local_int.wq, &wait); | 373 | add_wait_queue(&vcpu->arch.local_int.wq, &wait); |
374 | while (list_empty(&vcpu->arch.local_int.list) && | 374 | while (list_empty(&vcpu->arch.local_int.list) && |
@@ -377,18 +377,18 @@ no_timer: | |||
377 | !signal_pending(current)) { | 377 | !signal_pending(current)) { |
378 | set_current_state(TASK_INTERRUPTIBLE); | 378 | set_current_state(TASK_INTERRUPTIBLE); |
379 | spin_unlock_bh(&vcpu->arch.local_int.lock); | 379 | spin_unlock_bh(&vcpu->arch.local_int.lock); |
380 | spin_unlock_bh(&vcpu->arch.local_int.float_int->lock); | 380 | spin_unlock(&vcpu->arch.local_int.float_int->lock); |
381 | vcpu_put(vcpu); | 381 | vcpu_put(vcpu); |
382 | schedule(); | 382 | schedule(); |
383 | vcpu_load(vcpu); | 383 | vcpu_load(vcpu); |
384 | spin_lock_bh(&vcpu->arch.local_int.float_int->lock); | 384 | spin_lock(&vcpu->arch.local_int.float_int->lock); |
385 | spin_lock_bh(&vcpu->arch.local_int.lock); | 385 | spin_lock_bh(&vcpu->arch.local_int.lock); |
386 | } | 386 | } |
387 | __unset_cpu_idle(vcpu); | 387 | __unset_cpu_idle(vcpu); |
388 | __set_current_state(TASK_RUNNING); | 388 | __set_current_state(TASK_RUNNING); |
389 | remove_wait_queue(&vcpu->wq, &wait); | 389 | remove_wait_queue(&vcpu->wq, &wait); |
390 | spin_unlock_bh(&vcpu->arch.local_int.lock); | 390 | spin_unlock_bh(&vcpu->arch.local_int.lock); |
391 | spin_unlock_bh(&vcpu->arch.local_int.float_int->lock); | 391 | spin_unlock(&vcpu->arch.local_int.float_int->lock); |
392 | hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); | 392 | hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); |
393 | return 0; | 393 | return 0; |
394 | } | 394 | } |
@@ -455,7 +455,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
455 | if (atomic_read(&fi->active)) { | 455 | if (atomic_read(&fi->active)) { |
456 | do { | 456 | do { |
457 | deliver = 0; | 457 | deliver = 0; |
458 | spin_lock_bh(&fi->lock); | 458 | spin_lock(&fi->lock); |
459 | list_for_each_entry_safe(inti, n, &fi->list, list) { | 459 | list_for_each_entry_safe(inti, n, &fi->list, list) { |
460 | if (__interrupt_is_deliverable(vcpu, inti)) { | 460 | if (__interrupt_is_deliverable(vcpu, inti)) { |
461 | list_del(&inti->list); | 461 | list_del(&inti->list); |
@@ -466,7 +466,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
466 | } | 466 | } |
467 | if (list_empty(&fi->list)) | 467 | if (list_empty(&fi->list)) |
468 | atomic_set(&fi->active, 0); | 468 | atomic_set(&fi->active, 0); |
469 | spin_unlock_bh(&fi->lock); | 469 | spin_unlock(&fi->lock); |
470 | if (deliver) { | 470 | if (deliver) { |
471 | __do_deliver_interrupt(vcpu, inti); | 471 | __do_deliver_interrupt(vcpu, inti); |
472 | kfree(inti); | 472 | kfree(inti); |
@@ -531,7 +531,7 @@ int kvm_s390_inject_vm(struct kvm *kvm, | |||
531 | 531 | ||
532 | mutex_lock(&kvm->lock); | 532 | mutex_lock(&kvm->lock); |
533 | fi = &kvm->arch.float_int; | 533 | fi = &kvm->arch.float_int; |
534 | spin_lock_bh(&fi->lock); | 534 | spin_lock(&fi->lock); |
535 | list_add_tail(&inti->list, &fi->list); | 535 | list_add_tail(&inti->list, &fi->list); |
536 | atomic_set(&fi->active, 1); | 536 | atomic_set(&fi->active, 1); |
537 | sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); | 537 | sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); |
@@ -548,7 +548,7 @@ int kvm_s390_inject_vm(struct kvm *kvm, | |||
548 | if (waitqueue_active(&li->wq)) | 548 | if (waitqueue_active(&li->wq)) |
549 | wake_up_interruptible(&li->wq); | 549 | wake_up_interruptible(&li->wq); |
550 | spin_unlock_bh(&li->lock); | 550 | spin_unlock_bh(&li->lock); |
551 | spin_unlock_bh(&fi->lock); | 551 | spin_unlock(&fi->lock); |
552 | mutex_unlock(&kvm->lock); | 552 | mutex_unlock(&kvm->lock); |
553 | return 0; | 553 | return 0; |
554 | } | 554 | } |