diff options
Diffstat (limited to 'arch/s390/kvm/interrupt.c')
-rw-r--r-- | arch/s390/kvm/interrupt.c | 100 |
1 files changed, 32 insertions, 68 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 90c8de22a2a0..1be3d8da49e9 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -544,13 +544,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | |||
544 | int rc = 0; | 544 | int rc = 0; |
545 | 545 | ||
546 | if (atomic_read(&li->active)) { | 546 | if (atomic_read(&li->active)) { |
547 | spin_lock_bh(&li->lock); | 547 | spin_lock(&li->lock); |
548 | list_for_each_entry(inti, &li->list, list) | 548 | list_for_each_entry(inti, &li->list, list) |
549 | if (__interrupt_is_deliverable(vcpu, inti)) { | 549 | if (__interrupt_is_deliverable(vcpu, inti)) { |
550 | rc = 1; | 550 | rc = 1; |
551 | break; | 551 | break; |
552 | } | 552 | } |
553 | spin_unlock_bh(&li->lock); | 553 | spin_unlock(&li->lock); |
554 | } | 554 | } |
555 | 555 | ||
556 | if ((!rc) && atomic_read(&fi->active)) { | 556 | if ((!rc) && atomic_read(&fi->active)) { |
@@ -585,88 +585,56 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |||
585 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | 585 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) |
586 | { | 586 | { |
587 | u64 now, sltime; | 587 | u64 now, sltime; |
588 | DECLARE_WAITQUEUE(wait, current); | ||
589 | 588 | ||
590 | vcpu->stat.exit_wait_state++; | 589 | vcpu->stat.exit_wait_state++; |
591 | if (kvm_cpu_has_interrupt(vcpu)) | ||
592 | return 0; | ||
593 | 590 | ||
594 | __set_cpu_idle(vcpu); | 591 | /* fast path */ |
595 | spin_lock_bh(&vcpu->arch.local_int.lock); | 592 | if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu)) |
596 | vcpu->arch.local_int.timer_due = 0; | 593 | return 0; |
597 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
598 | 594 | ||
599 | if (psw_interrupts_disabled(vcpu)) { | 595 | if (psw_interrupts_disabled(vcpu)) { |
600 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); | 596 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); |
601 | __unset_cpu_idle(vcpu); | ||
602 | return -EOPNOTSUPP; /* disabled wait */ | 597 | return -EOPNOTSUPP; /* disabled wait */ |
603 | } | 598 | } |
604 | 599 | ||
600 | __set_cpu_idle(vcpu); | ||
605 | if (!ckc_interrupts_enabled(vcpu)) { | 601 | if (!ckc_interrupts_enabled(vcpu)) { |
606 | VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); | 602 | VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); |
607 | goto no_timer; | 603 | goto no_timer; |
608 | } | 604 | } |
609 | 605 | ||
610 | now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; | 606 | now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; |
611 | if (vcpu->arch.sie_block->ckc < now) { | ||
612 | __unset_cpu_idle(vcpu); | ||
613 | return 0; | ||
614 | } | ||
615 | |||
616 | sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); | 607 | sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); |
617 | |||
618 | hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); | 608 | hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); |
619 | VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); | 609 | VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); |
620 | no_timer: | 610 | no_timer: |
621 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | 611 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); |
622 | spin_lock(&vcpu->arch.local_int.float_int->lock); | 612 | kvm_vcpu_block(vcpu); |
623 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
624 | add_wait_queue(&vcpu->wq, &wait); | ||
625 | while (list_empty(&vcpu->arch.local_int.list) && | ||
626 | list_empty(&vcpu->arch.local_int.float_int->list) && | ||
627 | (!vcpu->arch.local_int.timer_due) && | ||
628 | !signal_pending(current) && | ||
629 | !kvm_s390_si_ext_call_pending(vcpu)) { | ||
630 | set_current_state(TASK_INTERRUPTIBLE); | ||
631 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
632 | spin_unlock(&vcpu->arch.local_int.float_int->lock); | ||
633 | schedule(); | ||
634 | spin_lock(&vcpu->arch.local_int.float_int->lock); | ||
635 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
636 | } | ||
637 | __unset_cpu_idle(vcpu); | 613 | __unset_cpu_idle(vcpu); |
638 | __set_current_state(TASK_RUNNING); | ||
639 | remove_wait_queue(&vcpu->wq, &wait); | ||
640 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
641 | spin_unlock(&vcpu->arch.local_int.float_int->lock); | ||
642 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | 614 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
643 | 615 | ||
644 | hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); | 616 | hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); |
645 | return 0; | 617 | return 0; |
646 | } | 618 | } |
647 | 619 | ||
648 | void kvm_s390_tasklet(unsigned long parm) | 620 | void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) |
649 | { | 621 | { |
650 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; | 622 | if (waitqueue_active(&vcpu->wq)) { |
651 | 623 | /* | |
652 | spin_lock(&vcpu->arch.local_int.lock); | 624 | * The vcpu gave up the cpu voluntarily, mark it as a good |
653 | vcpu->arch.local_int.timer_due = 1; | 625 | * yield-candidate. |
654 | if (waitqueue_active(&vcpu->wq)) | 626 | */ |
627 | vcpu->preempted = true; | ||
655 | wake_up_interruptible(&vcpu->wq); | 628 | wake_up_interruptible(&vcpu->wq); |
656 | spin_unlock(&vcpu->arch.local_int.lock); | 629 | } |
657 | } | 630 | } |
658 | 631 | ||
659 | /* | ||
660 | * low level hrtimer wake routine. Because this runs in hardirq context | ||
661 | * we schedule a tasklet to do the real work. | ||
662 | */ | ||
663 | enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) | 632 | enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) |
664 | { | 633 | { |
665 | struct kvm_vcpu *vcpu; | 634 | struct kvm_vcpu *vcpu; |
666 | 635 | ||
667 | vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); | 636 | vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); |
668 | vcpu->preempted = true; | 637 | kvm_s390_vcpu_wakeup(vcpu); |
669 | tasklet_schedule(&vcpu->arch.tasklet); | ||
670 | 638 | ||
671 | return HRTIMER_NORESTART; | 639 | return HRTIMER_NORESTART; |
672 | } | 640 | } |
@@ -676,13 +644,13 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) | |||
676 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 644 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
677 | struct kvm_s390_interrupt_info *n, *inti = NULL; | 645 | struct kvm_s390_interrupt_info *n, *inti = NULL; |
678 | 646 | ||
679 | spin_lock_bh(&li->lock); | 647 | spin_lock(&li->lock); |
680 | list_for_each_entry_safe(inti, n, &li->list, list) { | 648 | list_for_each_entry_safe(inti, n, &li->list, list) { |
681 | list_del(&inti->list); | 649 | list_del(&inti->list); |
682 | kfree(inti); | 650 | kfree(inti); |
683 | } | 651 | } |
684 | atomic_set(&li->active, 0); | 652 | atomic_set(&li->active, 0); |
685 | spin_unlock_bh(&li->lock); | 653 | spin_unlock(&li->lock); |
686 | 654 | ||
687 | /* clear pending external calls set by sigp interpretation facility */ | 655 | /* clear pending external calls set by sigp interpretation facility */ |
688 | atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); | 656 | atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); |
@@ -701,7 +669,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
701 | if (atomic_read(&li->active)) { | 669 | if (atomic_read(&li->active)) { |
702 | do { | 670 | do { |
703 | deliver = 0; | 671 | deliver = 0; |
704 | spin_lock_bh(&li->lock); | 672 | spin_lock(&li->lock); |
705 | list_for_each_entry_safe(inti, n, &li->list, list) { | 673 | list_for_each_entry_safe(inti, n, &li->list, list) { |
706 | if (__interrupt_is_deliverable(vcpu, inti)) { | 674 | if (__interrupt_is_deliverable(vcpu, inti)) { |
707 | list_del(&inti->list); | 675 | list_del(&inti->list); |
@@ -712,7 +680,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
712 | } | 680 | } |
713 | if (list_empty(&li->list)) | 681 | if (list_empty(&li->list)) |
714 | atomic_set(&li->active, 0); | 682 | atomic_set(&li->active, 0); |
715 | spin_unlock_bh(&li->lock); | 683 | spin_unlock(&li->lock); |
716 | if (deliver) { | 684 | if (deliver) { |
717 | __do_deliver_interrupt(vcpu, inti); | 685 | __do_deliver_interrupt(vcpu, inti); |
718 | kfree(inti); | 686 | kfree(inti); |
@@ -758,7 +726,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) | |||
758 | if (atomic_read(&li->active)) { | 726 | if (atomic_read(&li->active)) { |
759 | do { | 727 | do { |
760 | deliver = 0; | 728 | deliver = 0; |
761 | spin_lock_bh(&li->lock); | 729 | spin_lock(&li->lock); |
762 | list_for_each_entry_safe(inti, n, &li->list, list) { | 730 | list_for_each_entry_safe(inti, n, &li->list, list) { |
763 | if ((inti->type == KVM_S390_MCHK) && | 731 | if ((inti->type == KVM_S390_MCHK) && |
764 | __interrupt_is_deliverable(vcpu, inti)) { | 732 | __interrupt_is_deliverable(vcpu, inti)) { |
@@ -770,7 +738,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) | |||
770 | } | 738 | } |
771 | if (list_empty(&li->list)) | 739 | if (list_empty(&li->list)) |
772 | atomic_set(&li->active, 0); | 740 | atomic_set(&li->active, 0); |
773 | spin_unlock_bh(&li->lock); | 741 | spin_unlock(&li->lock); |
774 | if (deliver) { | 742 | if (deliver) { |
775 | __do_deliver_interrupt(vcpu, inti); | 743 | __do_deliver_interrupt(vcpu, inti); |
776 | kfree(inti); | 744 | kfree(inti); |
@@ -817,11 +785,11 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) | |||
817 | 785 | ||
818 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); | 786 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); |
819 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); | 787 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); |
820 | spin_lock_bh(&li->lock); | 788 | spin_lock(&li->lock); |
821 | list_add(&inti->list, &li->list); | 789 | list_add(&inti->list, &li->list); |
822 | atomic_set(&li->active, 1); | 790 | atomic_set(&li->active, 1); |
823 | BUG_ON(waitqueue_active(li->wq)); | 791 | BUG_ON(waitqueue_active(li->wq)); |
824 | spin_unlock_bh(&li->lock); | 792 | spin_unlock(&li->lock); |
825 | return 0; | 793 | return 0; |
826 | } | 794 | } |
827 | 795 | ||
@@ -842,11 +810,11 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, | |||
842 | 810 | ||
843 | inti->type = KVM_S390_PROGRAM_INT; | 811 | inti->type = KVM_S390_PROGRAM_INT; |
844 | memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); | 812 | memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); |
845 | spin_lock_bh(&li->lock); | 813 | spin_lock(&li->lock); |
846 | list_add(&inti->list, &li->list); | 814 | list_add(&inti->list, &li->list); |
847 | atomic_set(&li->active, 1); | 815 | atomic_set(&li->active, 1); |
848 | BUG_ON(waitqueue_active(li->wq)); | 816 | BUG_ON(waitqueue_active(li->wq)); |
849 | spin_unlock_bh(&li->lock); | 817 | spin_unlock(&li->lock); |
850 | return 0; | 818 | return 0; |
851 | } | 819 | } |
852 | 820 | ||
@@ -934,12 +902,10 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | |||
934 | } | 902 | } |
935 | dst_vcpu = kvm_get_vcpu(kvm, sigcpu); | 903 | dst_vcpu = kvm_get_vcpu(kvm, sigcpu); |
936 | li = &dst_vcpu->arch.local_int; | 904 | li = &dst_vcpu->arch.local_int; |
937 | spin_lock_bh(&li->lock); | 905 | spin_lock(&li->lock); |
938 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 906 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); |
939 | if (waitqueue_active(li->wq)) | 907 | spin_unlock(&li->lock); |
940 | wake_up_interruptible(li->wq); | 908 | kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); |
941 | kvm_get_vcpu(kvm, sigcpu)->preempted = true; | ||
942 | spin_unlock_bh(&li->lock); | ||
943 | unlock_fi: | 909 | unlock_fi: |
944 | spin_unlock(&fi->lock); | 910 | spin_unlock(&fi->lock); |
945 | mutex_unlock(&kvm->lock); | 911 | mutex_unlock(&kvm->lock); |
@@ -1081,7 +1047,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
1081 | 1047 | ||
1082 | mutex_lock(&vcpu->kvm->lock); | 1048 | mutex_lock(&vcpu->kvm->lock); |
1083 | li = &vcpu->arch.local_int; | 1049 | li = &vcpu->arch.local_int; |
1084 | spin_lock_bh(&li->lock); | 1050 | spin_lock(&li->lock); |
1085 | if (inti->type == KVM_S390_PROGRAM_INT) | 1051 | if (inti->type == KVM_S390_PROGRAM_INT) |
1086 | list_add(&inti->list, &li->list); | 1052 | list_add(&inti->list, &li->list); |
1087 | else | 1053 | else |
@@ -1090,11 +1056,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
1090 | if (inti->type == KVM_S390_SIGP_STOP) | 1056 | if (inti->type == KVM_S390_SIGP_STOP) |
1091 | li->action_bits |= ACTION_STOP_ON_STOP; | 1057 | li->action_bits |= ACTION_STOP_ON_STOP; |
1092 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1058 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); |
1093 | if (waitqueue_active(&vcpu->wq)) | 1059 | spin_unlock(&li->lock); |
1094 | wake_up_interruptible(&vcpu->wq); | ||
1095 | vcpu->preempted = true; | ||
1096 | spin_unlock_bh(&li->lock); | ||
1097 | mutex_unlock(&vcpu->kvm->lock); | 1060 | mutex_unlock(&vcpu->kvm->lock); |
1061 | kvm_s390_vcpu_wakeup(vcpu); | ||
1098 | return 0; | 1062 | return 0; |
1099 | } | 1063 | } |
1100 | 1064 | ||