aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@de.ibm.com>2014-08-04 10:54:22 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2014-08-25 08:35:30 -0400
commitc3950b66b9ceff1614db870d2d5a9bd47531a712 (patch)
tree4baaf4f84fe1c43b066a1e542ef1f33f878a6c3d
parentfbfa304963fa8bf990dac1d05a77800d1e123b66 (diff)
KVM: s390: no special machine check delivery
The load PSW handler does not have to inject pending machine checks. This can wait until the CPU runs the generic interrupt injection code. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
-rw-r--r--arch/s390/kvm/interrupt.c56
-rw-r--r--arch/s390/kvm/kvm-s390.h1
-rw-r--r--arch/s390/kvm/priv.c9
3 files changed, 0 insertions, 66 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 71bf7e749cf7..34d741edb50a 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -721,62 +721,6 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
721 } 721 }
722} 722}
723 723
724void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
725{
726 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
727 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
728 struct kvm_s390_interrupt_info *n, *inti = NULL;
729 int deliver;
730
731 __reset_intercept_indicators(vcpu);
732 if (atomic_read(&li->active)) {
733 do {
734 deliver = 0;
735 spin_lock(&li->lock);
736 list_for_each_entry_safe(inti, n, &li->list, list) {
737 if ((inti->type == KVM_S390_MCHK) &&
738 __interrupt_is_deliverable(vcpu, inti)) {
739 list_del(&inti->list);
740 deliver = 1;
741 break;
742 }
743 __set_intercept_indicator(vcpu, inti);
744 }
745 if (list_empty(&li->list))
746 atomic_set(&li->active, 0);
747 spin_unlock(&li->lock);
748 if (deliver) {
749 __do_deliver_interrupt(vcpu, inti);
750 kfree(inti);
751 }
752 } while (deliver);
753 }
754
755 if (atomic_read(&fi->active)) {
756 do {
757 deliver = 0;
758 spin_lock(&fi->lock);
759 list_for_each_entry_safe(inti, n, &fi->list, list) {
760 if ((inti->type == KVM_S390_MCHK) &&
761 __interrupt_is_deliverable(vcpu, inti)) {
762 list_del(&inti->list);
763 fi->irq_count--;
764 deliver = 1;
765 break;
766 }
767 __set_intercept_indicator(vcpu, inti);
768 }
769 if (list_empty(&fi->list))
770 atomic_set(&fi->active, 0);
771 spin_unlock(&fi->lock);
772 if (deliver) {
773 __do_deliver_interrupt(vcpu, inti);
774 kfree(inti);
775 }
776 } while (deliver);
777 }
778}
779
780int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) 724int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
781{ 725{
782 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 726 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 3862fa2cefe0..894fa4653dfb 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -139,7 +139,6 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
139void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu); 139void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
140enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); 140enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
141void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); 141void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
142void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu);
143void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu); 142void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
144void kvm_s390_clear_float_irqs(struct kvm *kvm); 143void kvm_s390_clear_float_irqs(struct kvm *kvm);
145int __must_check kvm_s390_inject_vm(struct kvm *kvm, 144int __must_check kvm_s390_inject_vm(struct kvm *kvm,
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index f89c1cd67751..d806f2cfde16 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -352,13 +352,6 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
352 return 0; 352 return 0;
353} 353}
354 354
355static void handle_new_psw(struct kvm_vcpu *vcpu)
356{
357 /* Check whether the new psw is enabled for machine checks. */
358 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
359 kvm_s390_deliver_pending_machine_checks(vcpu);
360}
361
362#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) 355#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
363#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL 356#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
364#define PSW_ADDR_24 0x0000000000ffffffUL 357#define PSW_ADDR_24 0x0000000000ffffffUL
@@ -405,7 +398,6 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
405 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; 398 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
406 if (!is_valid_psw(gpsw)) 399 if (!is_valid_psw(gpsw))
407 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 400 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
408 handle_new_psw(vcpu);
409 return 0; 401 return 0;
410} 402}
411 403
@@ -427,7 +419,6 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
427 vcpu->arch.sie_block->gpsw = new_psw; 419 vcpu->arch.sie_block->gpsw = new_psw;
428 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) 420 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
429 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 421 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
430 handle_new_psw(vcpu);
431 return 0; 422 return 0;
432} 423}
433 424