aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2014-05-16 04:23:53 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2014-07-21 07:22:28 -0400
commit4ae3c0815fb63cbed1afcd5bacc7705c6d1b9fec (patch)
tree25212ba944cfbf2c012f5da378cfe67410846f67 /arch/s390
parent0759d0681cae279e77ebb4b76175e330360b01d9 (diff)
KVM: s390: remove _bh locking from local_int.lock
local_int.lock is not used in a bottom-half handler anymore, therefore we can turn it into an ordinary spin_lock at all occurrences. Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> Acked-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/kvm/interrupt.c32
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/kvm/sigp.c20
3 files changed, 28 insertions, 28 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 5fd11ce3dc3d..86575b4cdc1c 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -544,13 +544,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
544 int rc = 0; 544 int rc = 0;
545 545
546 if (atomic_read(&li->active)) { 546 if (atomic_read(&li->active)) {
547 spin_lock_bh(&li->lock); 547 spin_lock(&li->lock);
548 list_for_each_entry(inti, &li->list, list) 548 list_for_each_entry(inti, &li->list, list)
549 if (__interrupt_is_deliverable(vcpu, inti)) { 549 if (__interrupt_is_deliverable(vcpu, inti)) {
550 rc = 1; 550 rc = 1;
551 break; 551 break;
552 } 552 }
553 spin_unlock_bh(&li->lock); 553 spin_unlock(&li->lock);
554 } 554 }
555 555
556 if ((!rc) && atomic_read(&fi->active)) { 556 if ((!rc) && atomic_read(&fi->active)) {
@@ -645,13 +645,13 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
645 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 645 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
646 struct kvm_s390_interrupt_info *n, *inti = NULL; 646 struct kvm_s390_interrupt_info *n, *inti = NULL;
647 647
648 spin_lock_bh(&li->lock); 648 spin_lock(&li->lock);
649 list_for_each_entry_safe(inti, n, &li->list, list) { 649 list_for_each_entry_safe(inti, n, &li->list, list) {
650 list_del(&inti->list); 650 list_del(&inti->list);
651 kfree(inti); 651 kfree(inti);
652 } 652 }
653 atomic_set(&li->active, 0); 653 atomic_set(&li->active, 0);
654 spin_unlock_bh(&li->lock); 654 spin_unlock(&li->lock);
655 655
656 /* clear pending external calls set by sigp interpretation facility */ 656 /* clear pending external calls set by sigp interpretation facility */
657 atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); 657 atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
@@ -670,7 +670,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
670 if (atomic_read(&li->active)) { 670 if (atomic_read(&li->active)) {
671 do { 671 do {
672 deliver = 0; 672 deliver = 0;
673 spin_lock_bh(&li->lock); 673 spin_lock(&li->lock);
674 list_for_each_entry_safe(inti, n, &li->list, list) { 674 list_for_each_entry_safe(inti, n, &li->list, list) {
675 if (__interrupt_is_deliverable(vcpu, inti)) { 675 if (__interrupt_is_deliverable(vcpu, inti)) {
676 list_del(&inti->list); 676 list_del(&inti->list);
@@ -681,7 +681,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
681 } 681 }
682 if (list_empty(&li->list)) 682 if (list_empty(&li->list))
683 atomic_set(&li->active, 0); 683 atomic_set(&li->active, 0);
684 spin_unlock_bh(&li->lock); 684 spin_unlock(&li->lock);
685 if (deliver) { 685 if (deliver) {
686 __do_deliver_interrupt(vcpu, inti); 686 __do_deliver_interrupt(vcpu, inti);
687 kfree(inti); 687 kfree(inti);
@@ -727,7 +727,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
727 if (atomic_read(&li->active)) { 727 if (atomic_read(&li->active)) {
728 do { 728 do {
729 deliver = 0; 729 deliver = 0;
730 spin_lock_bh(&li->lock); 730 spin_lock(&li->lock);
731 list_for_each_entry_safe(inti, n, &li->list, list) { 731 list_for_each_entry_safe(inti, n, &li->list, list) {
732 if ((inti->type == KVM_S390_MCHK) && 732 if ((inti->type == KVM_S390_MCHK) &&
733 __interrupt_is_deliverable(vcpu, inti)) { 733 __interrupt_is_deliverable(vcpu, inti)) {
@@ -739,7 +739,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
739 } 739 }
740 if (list_empty(&li->list)) 740 if (list_empty(&li->list))
741 atomic_set(&li->active, 0); 741 atomic_set(&li->active, 0);
742 spin_unlock_bh(&li->lock); 742 spin_unlock(&li->lock);
743 if (deliver) { 743 if (deliver) {
744 __do_deliver_interrupt(vcpu, inti); 744 __do_deliver_interrupt(vcpu, inti);
745 kfree(inti); 745 kfree(inti);
@@ -786,11 +786,11 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
786 786
787 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); 787 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
788 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); 788 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
789 spin_lock_bh(&li->lock); 789 spin_lock(&li->lock);
790 list_add(&inti->list, &li->list); 790 list_add(&inti->list, &li->list);
791 atomic_set(&li->active, 1); 791 atomic_set(&li->active, 1);
792 BUG_ON(waitqueue_active(li->wq)); 792 BUG_ON(waitqueue_active(li->wq));
793 spin_unlock_bh(&li->lock); 793 spin_unlock(&li->lock);
794 return 0; 794 return 0;
795} 795}
796 796
@@ -811,11 +811,11 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
811 811
812 inti->type = KVM_S390_PROGRAM_INT; 812 inti->type = KVM_S390_PROGRAM_INT;
813 memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); 813 memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
814 spin_lock_bh(&li->lock); 814 spin_lock(&li->lock);
815 list_add(&inti->list, &li->list); 815 list_add(&inti->list, &li->list);
816 atomic_set(&li->active, 1); 816 atomic_set(&li->active, 1);
817 BUG_ON(waitqueue_active(li->wq)); 817 BUG_ON(waitqueue_active(li->wq));
818 spin_unlock_bh(&li->lock); 818 spin_unlock(&li->lock);
819 return 0; 819 return 0;
820} 820}
821 821
@@ -903,12 +903,12 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
903 } 903 }
904 dst_vcpu = kvm_get_vcpu(kvm, sigcpu); 904 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
905 li = &dst_vcpu->arch.local_int; 905 li = &dst_vcpu->arch.local_int;
906 spin_lock_bh(&li->lock); 906 spin_lock(&li->lock);
907 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 907 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
908 if (waitqueue_active(li->wq)) 908 if (waitqueue_active(li->wq))
909 wake_up_interruptible(li->wq); 909 wake_up_interruptible(li->wq);
910 kvm_get_vcpu(kvm, sigcpu)->preempted = true; 910 kvm_get_vcpu(kvm, sigcpu)->preempted = true;
911 spin_unlock_bh(&li->lock); 911 spin_unlock(&li->lock);
912unlock_fi: 912unlock_fi:
913 spin_unlock(&fi->lock); 913 spin_unlock(&fi->lock);
914 mutex_unlock(&kvm->lock); 914 mutex_unlock(&kvm->lock);
@@ -1050,7 +1050,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
1050 1050
1051 mutex_lock(&vcpu->kvm->lock); 1051 mutex_lock(&vcpu->kvm->lock);
1052 li = &vcpu->arch.local_int; 1052 li = &vcpu->arch.local_int;
1053 spin_lock_bh(&li->lock); 1053 spin_lock(&li->lock);
1054 if (inti->type == KVM_S390_PROGRAM_INT) 1054 if (inti->type == KVM_S390_PROGRAM_INT)
1055 list_add(&inti->list, &li->list); 1055 list_add(&inti->list, &li->list);
1056 else 1056 else
@@ -1062,7 +1062,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
1062 if (waitqueue_active(&vcpu->wq)) 1062 if (waitqueue_active(&vcpu->wq))
1063 wake_up_interruptible(&vcpu->wq); 1063 wake_up_interruptible(&vcpu->wq);
1064 vcpu->preempted = true; 1064 vcpu->preempted = true;
1065 spin_unlock_bh(&li->lock); 1065 spin_unlock(&li->lock);
1066 mutex_unlock(&vcpu->kvm->lock); 1066 mutex_unlock(&vcpu->kvm->lock);
1067 return 0; 1067 return 0;
1068} 1068}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ecb135702313..a7bda180fe65 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1522,13 +1522,13 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1522 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 1522 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1523 1523
1524 /* Need to lock access to action_bits to avoid a SIGP race condition */ 1524 /* Need to lock access to action_bits to avoid a SIGP race condition */
1525 spin_lock_bh(&vcpu->arch.local_int.lock); 1525 spin_lock(&vcpu->arch.local_int.lock);
1526 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 1526 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1527 1527
1528 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 1528 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1529 vcpu->arch.local_int.action_bits &= 1529 vcpu->arch.local_int.action_bits &=
1530 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP); 1530 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
1531 spin_unlock_bh(&vcpu->arch.local_int.lock); 1531 spin_unlock(&vcpu->arch.local_int.lock);
1532 1532
1533 __disable_ibs_on_vcpu(vcpu); 1533 __disable_ibs_on_vcpu(vcpu);
1534 1534
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index fd7fb5c5ef5d..946992f7bb25 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -135,7 +135,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
135 return -ENOMEM; 135 return -ENOMEM;
136 inti->type = KVM_S390_SIGP_STOP; 136 inti->type = KVM_S390_SIGP_STOP;
137 137
138 spin_lock_bh(&li->lock); 138 spin_lock(&li->lock);
139 if (li->action_bits & ACTION_STOP_ON_STOP) { 139 if (li->action_bits & ACTION_STOP_ON_STOP) {
140 /* another SIGP STOP is pending */ 140 /* another SIGP STOP is pending */
141 rc = SIGP_CC_BUSY; 141 rc = SIGP_CC_BUSY;
@@ -154,7 +154,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
154 if (waitqueue_active(li->wq)) 154 if (waitqueue_active(li->wq))
155 wake_up_interruptible(li->wq); 155 wake_up_interruptible(li->wq);
156out: 156out:
157 spin_unlock_bh(&li->lock); 157 spin_unlock(&li->lock);
158 158
159 return rc; 159 return rc;
160} 160}
@@ -243,7 +243,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
243 if (!inti) 243 if (!inti)
244 return SIGP_CC_BUSY; 244 return SIGP_CC_BUSY;
245 245
246 spin_lock_bh(&li->lock); 246 spin_lock(&li->lock);
247 /* cpu must be in stopped state */ 247 /* cpu must be in stopped state */
248 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { 248 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
249 *reg &= 0xffffffff00000000UL; 249 *reg &= 0xffffffff00000000UL;
@@ -264,7 +264,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
264 264
265 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); 265 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
266out_li: 266out_li:
267 spin_unlock_bh(&li->lock); 267 spin_unlock(&li->lock);
268 return rc; 268 return rc;
269} 269}
270 270
@@ -280,9 +280,9 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
280 if (!dst_vcpu) 280 if (!dst_vcpu)
281 return SIGP_CC_NOT_OPERATIONAL; 281 return SIGP_CC_NOT_OPERATIONAL;
282 282
283 spin_lock_bh(&dst_vcpu->arch.local_int.lock); 283 spin_lock(&dst_vcpu->arch.local_int.lock);
284 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); 284 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
285 spin_unlock_bh(&dst_vcpu->arch.local_int.lock); 285 spin_unlock(&dst_vcpu->arch.local_int.lock);
286 if (!(flags & CPUSTAT_STOPPED)) { 286 if (!(flags & CPUSTAT_STOPPED)) {
287 *reg &= 0xffffffff00000000UL; 287 *reg &= 0xffffffff00000000UL;
288 *reg |= SIGP_STATUS_INCORRECT_STATE; 288 *reg |= SIGP_STATUS_INCORRECT_STATE;
@@ -343,10 +343,10 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
343 if (!dst_vcpu) 343 if (!dst_vcpu)
344 return SIGP_CC_NOT_OPERATIONAL; 344 return SIGP_CC_NOT_OPERATIONAL;
345 li = &dst_vcpu->arch.local_int; 345 li = &dst_vcpu->arch.local_int;
346 spin_lock_bh(&li->lock); 346 spin_lock(&li->lock);
347 if (li->action_bits & ACTION_STOP_ON_STOP) 347 if (li->action_bits & ACTION_STOP_ON_STOP)
348 rc = SIGP_CC_BUSY; 348 rc = SIGP_CC_BUSY;
349 spin_unlock_bh(&li->lock); 349 spin_unlock(&li->lock);
350 350
351 return rc; 351 return rc;
352} 352}
@@ -466,11 +466,11 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
466 dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 466 dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
467 BUG_ON(dest_vcpu == NULL); 467 BUG_ON(dest_vcpu == NULL);
468 468
469 spin_lock_bh(&dest_vcpu->arch.local_int.lock); 469 spin_lock(&dest_vcpu->arch.local_int.lock);
470 if (waitqueue_active(&dest_vcpu->wq)) 470 if (waitqueue_active(&dest_vcpu->wq))
471 wake_up_interruptible(&dest_vcpu->wq); 471 wake_up_interruptible(&dest_vcpu->wq);
472 dest_vcpu->preempted = true; 472 dest_vcpu->preempted = true;
473 spin_unlock_bh(&dest_vcpu->arch.local_int.lock); 473 spin_unlock(&dest_vcpu->arch.local_int.lock);
474 474
475 kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); 475 kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
476 return 0; 476 return 0;