aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-07-22 04:22:53 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-07-22 04:22:53 -0400
commitc756ad036fc8ef432438e89aab63cc723eda39b4 (patch)
tree16bf4d2d280f624ff2ce99f5d02aeb7f002863f6
parent6f43ed01e87c8a8dbd8c826eaf0f714c1342c039 (diff)
parente59d120f96687a606db0513c427f10e30a427cc4 (diff)
Merge tag 'kvm-s390-20140721' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next
Bugfixes -------- - add IPTE to trace event decoder - document and advertise KVM_CAP_S390_IRQCHIP Cleanups -------- - Reuse kvm_vcpu_block for s390 - Get rid of tasklet for wakup processing
-rw-r--r--Documentation/virtual/kvm/api.txt27
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/include/uapi/asm/sie.h1
-rw-r--r--arch/s390/kvm/interrupt.c100
-rw-r--r--arch/s390/kvm/kvm-s390.c18
-rw-r--r--arch/s390/kvm/kvm-s390.h2
-rw-r--r--arch/s390/kvm/sigp.c36
7 files changed, 82 insertions, 104 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 587972ca12c5..68cda1fc3d52 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2934,15 +2934,18 @@ The fields in each entry are defined as follows:
29346. Capabilities that can be enabled 29346. Capabilities that can be enabled
2935----------------------------------- 2935-----------------------------------
2936 2936
2937There are certain capabilities that change the behavior of the virtual CPU when 2937There are certain capabilities that change the behavior of the virtual CPU or
2938enabled. To enable them, please see section 4.37. Below you can find a list of 2938the virtual machine when enabled. To enable them, please see section 4.37.
2939capabilities and what their effect on the vCPU is when enabling them. 2939Below you can find a list of capabilities and what their effect on the vCPU or
2940the virtual machine is when enabling them.
2940 2941
2941The following information is provided along with the description: 2942The following information is provided along with the description:
2942 2943
2943 Architectures: which instruction set architectures provide this ioctl. 2944 Architectures: which instruction set architectures provide this ioctl.
2944 x86 includes both i386 and x86_64. 2945 x86 includes both i386 and x86_64.
2945 2946
2947 Target: whether this is a per-vcpu or per-vm capability.
2948
2946 Parameters: what parameters are accepted by the capability. 2949 Parameters: what parameters are accepted by the capability.
2947 2950
2948 Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL) 2951 Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL)
@@ -2952,6 +2955,7 @@ The following information is provided along with the description:
29526.1 KVM_CAP_PPC_OSI 29556.1 KVM_CAP_PPC_OSI
2953 2956
2954Architectures: ppc 2957Architectures: ppc
2958Target: vcpu
2955Parameters: none 2959Parameters: none
2956Returns: 0 on success; -1 on error 2960Returns: 0 on success; -1 on error
2957 2961
@@ -2966,6 +2970,7 @@ When this capability is enabled, KVM_EXIT_OSI can occur.
29666.2 KVM_CAP_PPC_PAPR 29706.2 KVM_CAP_PPC_PAPR
2967 2971
2968Architectures: ppc 2972Architectures: ppc
2973Target: vcpu
2969Parameters: none 2974Parameters: none
2970Returns: 0 on success; -1 on error 2975Returns: 0 on success; -1 on error
2971 2976
@@ -2985,6 +2990,7 @@ When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur.
29856.3 KVM_CAP_SW_TLB 29906.3 KVM_CAP_SW_TLB
2986 2991
2987Architectures: ppc 2992Architectures: ppc
2993Target: vcpu
2988Parameters: args[0] is the address of a struct kvm_config_tlb 2994Parameters: args[0] is the address of a struct kvm_config_tlb
2989Returns: 0 on success; -1 on error 2995Returns: 0 on success; -1 on error
2990 2996
@@ -3027,6 +3033,7 @@ For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
30276.4 KVM_CAP_S390_CSS_SUPPORT 30336.4 KVM_CAP_S390_CSS_SUPPORT
3028 3034
3029Architectures: s390 3035Architectures: s390
3036Target: vcpu
3030Parameters: none 3037Parameters: none
3031Returns: 0 on success; -1 on error 3038Returns: 0 on success; -1 on error
3032 3039
@@ -3038,9 +3045,13 @@ handled in-kernel, while the other I/O instructions are passed to userspace.
3038When this capability is enabled, KVM_EXIT_S390_TSCH will occur on TEST 3045When this capability is enabled, KVM_EXIT_S390_TSCH will occur on TEST
3039SUBCHANNEL intercepts. 3046SUBCHANNEL intercepts.
3040 3047
3048Note that even though this capability is enabled per-vcpu, the complete
3049virtual machine is affected.
3050
30416.5 KVM_CAP_PPC_EPR 30516.5 KVM_CAP_PPC_EPR
3042 3052
3043Architectures: ppc 3053Architectures: ppc
3054Target: vcpu
3044Parameters: args[0] defines whether the proxy facility is active 3055Parameters: args[0] defines whether the proxy facility is active
3045Returns: 0 on success; -1 on error 3056Returns: 0 on success; -1 on error
3046 3057
@@ -3066,7 +3077,17 @@ This capability connects the vcpu to an in-kernel MPIC device.
30666.7 KVM_CAP_IRQ_XICS 30776.7 KVM_CAP_IRQ_XICS
3067 3078
3068Architectures: ppc 3079Architectures: ppc
3080Target: vcpu
3069Parameters: args[0] is the XICS device fd 3081Parameters: args[0] is the XICS device fd
3070 args[1] is the XICS CPU number (server ID) for this vcpu 3082 args[1] is the XICS CPU number (server ID) for this vcpu
3071 3083
3072This capability connects the vcpu to an in-kernel XICS device. 3084This capability connects the vcpu to an in-kernel XICS device.
3085
30866.8 KVM_CAP_S390_IRQCHIP
3087
3088Architectures: s390
3089Target: vm
3090Parameters: none
3091
3092This capability enables the in-kernel irqchip for s390. Please refer to
3093"4.24 KVM_CREATE_IRQCHIP" for details.
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index c2ba0208a0e1..773bef7614d8 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -305,7 +305,6 @@ struct kvm_s390_local_interrupt {
305 struct list_head list; 305 struct list_head list;
306 atomic_t active; 306 atomic_t active;
307 struct kvm_s390_float_interrupt *float_int; 307 struct kvm_s390_float_interrupt *float_int;
308 int timer_due; /* event indicator for waitqueue below */
309 wait_queue_head_t *wq; 308 wait_queue_head_t *wq;
310 atomic_t *cpuflags; 309 atomic_t *cpuflags;
311 unsigned int action_bits; 310 unsigned int action_bits;
@@ -367,7 +366,6 @@ struct kvm_vcpu_arch {
367 s390_fp_regs guest_fpregs; 366 s390_fp_regs guest_fpregs;
368 struct kvm_s390_local_interrupt local_int; 367 struct kvm_s390_local_interrupt local_int;
369 struct hrtimer ckc_timer; 368 struct hrtimer ckc_timer;
370 struct tasklet_struct tasklet;
371 struct kvm_s390_pgm_info pgm; 369 struct kvm_s390_pgm_info pgm;
372 union { 370 union {
373 struct cpuid cpu_id; 371 struct cpuid cpu_id;
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h
index 5d9cc19462c4..d4096fdfc6ab 100644
--- a/arch/s390/include/uapi/asm/sie.h
+++ b/arch/s390/include/uapi/asm/sie.h
@@ -108,6 +108,7 @@
108 exit_code_ipa0(0xB2, 0x17, "STETR"), \ 108 exit_code_ipa0(0xB2, 0x17, "STETR"), \
109 exit_code_ipa0(0xB2, 0x18, "PC"), \ 109 exit_code_ipa0(0xB2, 0x18, "PC"), \
110 exit_code_ipa0(0xB2, 0x20, "SERVC"), \ 110 exit_code_ipa0(0xB2, 0x20, "SERVC"), \
111 exit_code_ipa0(0xB2, 0x21, "IPTE"), \
111 exit_code_ipa0(0xB2, 0x28, "PT"), \ 112 exit_code_ipa0(0xB2, 0x28, "PT"), \
112 exit_code_ipa0(0xB2, 0x29, "ISKE"), \ 113 exit_code_ipa0(0xB2, 0x29, "ISKE"), \
113 exit_code_ipa0(0xB2, 0x2a, "RRBE"), \ 114 exit_code_ipa0(0xB2, 0x2a, "RRBE"), \
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 90c8de22a2a0..1be3d8da49e9 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -544,13 +544,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
544 int rc = 0; 544 int rc = 0;
545 545
546 if (atomic_read(&li->active)) { 546 if (atomic_read(&li->active)) {
547 spin_lock_bh(&li->lock); 547 spin_lock(&li->lock);
548 list_for_each_entry(inti, &li->list, list) 548 list_for_each_entry(inti, &li->list, list)
549 if (__interrupt_is_deliverable(vcpu, inti)) { 549 if (__interrupt_is_deliverable(vcpu, inti)) {
550 rc = 1; 550 rc = 1;
551 break; 551 break;
552 } 552 }
553 spin_unlock_bh(&li->lock); 553 spin_unlock(&li->lock);
554 } 554 }
555 555
556 if ((!rc) && atomic_read(&fi->active)) { 556 if ((!rc) && atomic_read(&fi->active)) {
@@ -585,88 +585,56 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
585int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) 585int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
586{ 586{
587 u64 now, sltime; 587 u64 now, sltime;
588 DECLARE_WAITQUEUE(wait, current);
589 588
590 vcpu->stat.exit_wait_state++; 589 vcpu->stat.exit_wait_state++;
591 if (kvm_cpu_has_interrupt(vcpu))
592 return 0;
593 590
594 __set_cpu_idle(vcpu); 591 /* fast path */
595 spin_lock_bh(&vcpu->arch.local_int.lock); 592 if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
596 vcpu->arch.local_int.timer_due = 0; 593 return 0;
597 spin_unlock_bh(&vcpu->arch.local_int.lock);
598 594
599 if (psw_interrupts_disabled(vcpu)) { 595 if (psw_interrupts_disabled(vcpu)) {
600 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 596 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
601 __unset_cpu_idle(vcpu);
602 return -EOPNOTSUPP; /* disabled wait */ 597 return -EOPNOTSUPP; /* disabled wait */
603 } 598 }
604 599
600 __set_cpu_idle(vcpu);
605 if (!ckc_interrupts_enabled(vcpu)) { 601 if (!ckc_interrupts_enabled(vcpu)) {
606 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 602 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
607 goto no_timer; 603 goto no_timer;
608 } 604 }
609 605
610 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; 606 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
611 if (vcpu->arch.sie_block->ckc < now) {
612 __unset_cpu_idle(vcpu);
613 return 0;
614 }
615
616 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 607 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
617
618 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 608 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
619 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); 609 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
620no_timer: 610no_timer:
621 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 611 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
622 spin_lock(&vcpu->arch.local_int.float_int->lock); 612 kvm_vcpu_block(vcpu);
623 spin_lock_bh(&vcpu->arch.local_int.lock);
624 add_wait_queue(&vcpu->wq, &wait);
625 while (list_empty(&vcpu->arch.local_int.list) &&
626 list_empty(&vcpu->arch.local_int.float_int->list) &&
627 (!vcpu->arch.local_int.timer_due) &&
628 !signal_pending(current) &&
629 !kvm_s390_si_ext_call_pending(vcpu)) {
630 set_current_state(TASK_INTERRUPTIBLE);
631 spin_unlock_bh(&vcpu->arch.local_int.lock);
632 spin_unlock(&vcpu->arch.local_int.float_int->lock);
633 schedule();
634 spin_lock(&vcpu->arch.local_int.float_int->lock);
635 spin_lock_bh(&vcpu->arch.local_int.lock);
636 }
637 __unset_cpu_idle(vcpu); 613 __unset_cpu_idle(vcpu);
638 __set_current_state(TASK_RUNNING);
639 remove_wait_queue(&vcpu->wq, &wait);
640 spin_unlock_bh(&vcpu->arch.local_int.lock);
641 spin_unlock(&vcpu->arch.local_int.float_int->lock);
642 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 614 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
643 615
644 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 616 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
645 return 0; 617 return 0;
646} 618}
647 619
648void kvm_s390_tasklet(unsigned long parm) 620void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
649{ 621{
650 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; 622 if (waitqueue_active(&vcpu->wq)) {
651 623 /*
652 spin_lock(&vcpu->arch.local_int.lock); 624 * The vcpu gave up the cpu voluntarily, mark it as a good
653 vcpu->arch.local_int.timer_due = 1; 625 * yield-candidate.
654 if (waitqueue_active(&vcpu->wq)) 626 */
627 vcpu->preempted = true;
655 wake_up_interruptible(&vcpu->wq); 628 wake_up_interruptible(&vcpu->wq);
656 spin_unlock(&vcpu->arch.local_int.lock); 629 }
657} 630}
658 631
659/*
660 * low level hrtimer wake routine. Because this runs in hardirq context
661 * we schedule a tasklet to do the real work.
662 */
663enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 632enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
664{ 633{
665 struct kvm_vcpu *vcpu; 634 struct kvm_vcpu *vcpu;
666 635
667 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 636 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
668 vcpu->preempted = true; 637 kvm_s390_vcpu_wakeup(vcpu);
669 tasklet_schedule(&vcpu->arch.tasklet);
670 638
671 return HRTIMER_NORESTART; 639 return HRTIMER_NORESTART;
672} 640}
@@ -676,13 +644,13 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
676 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 644 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
677 struct kvm_s390_interrupt_info *n, *inti = NULL; 645 struct kvm_s390_interrupt_info *n, *inti = NULL;
678 646
679 spin_lock_bh(&li->lock); 647 spin_lock(&li->lock);
680 list_for_each_entry_safe(inti, n, &li->list, list) { 648 list_for_each_entry_safe(inti, n, &li->list, list) {
681 list_del(&inti->list); 649 list_del(&inti->list);
682 kfree(inti); 650 kfree(inti);
683 } 651 }
684 atomic_set(&li->active, 0); 652 atomic_set(&li->active, 0);
685 spin_unlock_bh(&li->lock); 653 spin_unlock(&li->lock);
686 654
687 /* clear pending external calls set by sigp interpretation facility */ 655 /* clear pending external calls set by sigp interpretation facility */
688 atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); 656 atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
@@ -701,7 +669,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
701 if (atomic_read(&li->active)) { 669 if (atomic_read(&li->active)) {
702 do { 670 do {
703 deliver = 0; 671 deliver = 0;
704 spin_lock_bh(&li->lock); 672 spin_lock(&li->lock);
705 list_for_each_entry_safe(inti, n, &li->list, list) { 673 list_for_each_entry_safe(inti, n, &li->list, list) {
706 if (__interrupt_is_deliverable(vcpu, inti)) { 674 if (__interrupt_is_deliverable(vcpu, inti)) {
707 list_del(&inti->list); 675 list_del(&inti->list);
@@ -712,7 +680,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
712 } 680 }
713 if (list_empty(&li->list)) 681 if (list_empty(&li->list))
714 atomic_set(&li->active, 0); 682 atomic_set(&li->active, 0);
715 spin_unlock_bh(&li->lock); 683 spin_unlock(&li->lock);
716 if (deliver) { 684 if (deliver) {
717 __do_deliver_interrupt(vcpu, inti); 685 __do_deliver_interrupt(vcpu, inti);
718 kfree(inti); 686 kfree(inti);
@@ -758,7 +726,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
758 if (atomic_read(&li->active)) { 726 if (atomic_read(&li->active)) {
759 do { 727 do {
760 deliver = 0; 728 deliver = 0;
761 spin_lock_bh(&li->lock); 729 spin_lock(&li->lock);
762 list_for_each_entry_safe(inti, n, &li->list, list) { 730 list_for_each_entry_safe(inti, n, &li->list, list) {
763 if ((inti->type == KVM_S390_MCHK) && 731 if ((inti->type == KVM_S390_MCHK) &&
764 __interrupt_is_deliverable(vcpu, inti)) { 732 __interrupt_is_deliverable(vcpu, inti)) {
@@ -770,7 +738,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
770 } 738 }
771 if (list_empty(&li->list)) 739 if (list_empty(&li->list))
772 atomic_set(&li->active, 0); 740 atomic_set(&li->active, 0);
773 spin_unlock_bh(&li->lock); 741 spin_unlock(&li->lock);
774 if (deliver) { 742 if (deliver) {
775 __do_deliver_interrupt(vcpu, inti); 743 __do_deliver_interrupt(vcpu, inti);
776 kfree(inti); 744 kfree(inti);
@@ -817,11 +785,11 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
817 785
818 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); 786 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
819 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); 787 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
820 spin_lock_bh(&li->lock); 788 spin_lock(&li->lock);
821 list_add(&inti->list, &li->list); 789 list_add(&inti->list, &li->list);
822 atomic_set(&li->active, 1); 790 atomic_set(&li->active, 1);
823 BUG_ON(waitqueue_active(li->wq)); 791 BUG_ON(waitqueue_active(li->wq));
824 spin_unlock_bh(&li->lock); 792 spin_unlock(&li->lock);
825 return 0; 793 return 0;
826} 794}
827 795
@@ -842,11 +810,11 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
842 810
843 inti->type = KVM_S390_PROGRAM_INT; 811 inti->type = KVM_S390_PROGRAM_INT;
844 memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); 812 memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
845 spin_lock_bh(&li->lock); 813 spin_lock(&li->lock);
846 list_add(&inti->list, &li->list); 814 list_add(&inti->list, &li->list);
847 atomic_set(&li->active, 1); 815 atomic_set(&li->active, 1);
848 BUG_ON(waitqueue_active(li->wq)); 816 BUG_ON(waitqueue_active(li->wq));
849 spin_unlock_bh(&li->lock); 817 spin_unlock(&li->lock);
850 return 0; 818 return 0;
851} 819}
852 820
@@ -934,12 +902,10 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
934 } 902 }
935 dst_vcpu = kvm_get_vcpu(kvm, sigcpu); 903 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
936 li = &dst_vcpu->arch.local_int; 904 li = &dst_vcpu->arch.local_int;
937 spin_lock_bh(&li->lock); 905 spin_lock(&li->lock);
938 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 906 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
939 if (waitqueue_active(li->wq)) 907 spin_unlock(&li->lock);
940 wake_up_interruptible(li->wq); 908 kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
941 kvm_get_vcpu(kvm, sigcpu)->preempted = true;
942 spin_unlock_bh(&li->lock);
943unlock_fi: 909unlock_fi:
944 spin_unlock(&fi->lock); 910 spin_unlock(&fi->lock);
945 mutex_unlock(&kvm->lock); 911 mutex_unlock(&kvm->lock);
@@ -1081,7 +1047,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
1081 1047
1082 mutex_lock(&vcpu->kvm->lock); 1048 mutex_lock(&vcpu->kvm->lock);
1083 li = &vcpu->arch.local_int; 1049 li = &vcpu->arch.local_int;
1084 spin_lock_bh(&li->lock); 1050 spin_lock(&li->lock);
1085 if (inti->type == KVM_S390_PROGRAM_INT) 1051 if (inti->type == KVM_S390_PROGRAM_INT)
1086 list_add(&inti->list, &li->list); 1052 list_add(&inti->list, &li->list);
1087 else 1053 else
@@ -1090,11 +1056,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
1090 if (inti->type == KVM_S390_SIGP_STOP) 1056 if (inti->type == KVM_S390_SIGP_STOP)
1091 li->action_bits |= ACTION_STOP_ON_STOP; 1057 li->action_bits |= ACTION_STOP_ON_STOP;
1092 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1058 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1093 if (waitqueue_active(&vcpu->wq)) 1059 spin_unlock(&li->lock);
1094 wake_up_interruptible(&vcpu->wq);
1095 vcpu->preempted = true;
1096 spin_unlock_bh(&li->lock);
1097 mutex_unlock(&vcpu->kvm->lock); 1060 mutex_unlock(&vcpu->kvm->lock);
1061 kvm_s390_vcpu_wakeup(vcpu);
1098 return 0; 1062 return 0;
1099} 1063}
1100 1064
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index fdf88f7a539c..339b34a02fb8 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -166,6 +166,7 @@ int kvm_dev_ioctl_check_extension(long ext)
166 case KVM_CAP_IOEVENTFD: 166 case KVM_CAP_IOEVENTFD:
167 case KVM_CAP_DEVICE_CTRL: 167 case KVM_CAP_DEVICE_CTRL:
168 case KVM_CAP_ENABLE_CAP_VM: 168 case KVM_CAP_ENABLE_CAP_VM:
169 case KVM_CAP_S390_IRQCHIP:
169 case KVM_CAP_VM_ATTRIBUTES: 170 case KVM_CAP_VM_ATTRIBUTES:
170 case KVM_CAP_MP_STATE: 171 case KVM_CAP_MP_STATE:
171 r = 1; 172 r = 1;
@@ -649,8 +650,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
649 return rc; 650 return rc;
650 } 651 }
651 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 652 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
652 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
653 (unsigned long) vcpu);
654 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; 653 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
655 get_cpu_id(&vcpu->arch.cpu_id); 654 get_cpu_id(&vcpu->arch.cpu_id);
656 vcpu->arch.cpu_id.version = 0xff; 655 vcpu->arch.cpu_id.version = 0xff;
@@ -1068,6 +1067,9 @@ retry:
1068 goto retry; 1067 goto retry;
1069 } 1068 }
1070 1069
1070 /* nothing to do, just clear the request */
1071 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1072
1071 return 0; 1073 return 0;
1072} 1074}
1073 1075
@@ -1475,7 +1477,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1475 1477
1476 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); 1478 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
1477 /* Only one cpu at a time may enter/leave the STOPPED state. */ 1479 /* Only one cpu at a time may enter/leave the STOPPED state. */
1478 spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); 1480 spin_lock(&vcpu->kvm->arch.start_stop_lock);
1479 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 1481 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1480 1482
1481 for (i = 0; i < online_vcpus; i++) { 1483 for (i = 0; i < online_vcpus; i++) {
@@ -1501,7 +1503,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1501 * Let's play safe and flush the VCPU at startup. 1503 * Let's play safe and flush the VCPU at startup.
1502 */ 1504 */
1503 vcpu->arch.sie_block->ihcpu = 0xffff; 1505 vcpu->arch.sie_block->ihcpu = 0xffff;
1504 spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); 1506 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
1505 return; 1507 return;
1506} 1508}
1507 1509
@@ -1515,17 +1517,17 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1515 1517
1516 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); 1518 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
1517 /* Only one cpu at a time may enter/leave the STOPPED state. */ 1519 /* Only one cpu at a time may enter/leave the STOPPED state. */
1518 spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); 1520 spin_lock(&vcpu->kvm->arch.start_stop_lock);
1519 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 1521 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1520 1522
1521 /* Need to lock access to action_bits to avoid a SIGP race condition */ 1523 /* Need to lock access to action_bits to avoid a SIGP race condition */
1522 spin_lock_bh(&vcpu->arch.local_int.lock); 1524 spin_lock(&vcpu->arch.local_int.lock);
1523 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 1525 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1524 1526
1525 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 1527 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1526 vcpu->arch.local_int.action_bits &= 1528 vcpu->arch.local_int.action_bits &=
1527 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP); 1529 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
1528 spin_unlock_bh(&vcpu->arch.local_int.lock); 1530 spin_unlock(&vcpu->arch.local_int.lock);
1529 1531
1530 __disable_ibs_on_vcpu(vcpu); 1532 __disable_ibs_on_vcpu(vcpu);
1531 1533
@@ -1544,7 +1546,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1544 __enable_ibs_on_vcpu(started_vcpu); 1546 __enable_ibs_on_vcpu(started_vcpu);
1545 } 1547 }
1546 1548
1547 spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); 1549 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
1548 return; 1550 return;
1549} 1551}
1550 1552
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 33a0e4bed2a5..3862fa2cefe0 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -136,8 +136,8 @@ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
136} 136}
137 137
138int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); 138int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
139void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
139enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); 140enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
140void kvm_s390_tasklet(unsigned long parm);
141void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); 141void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
142void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); 142void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu);
143void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu); 143void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index fd7fb5c5ef5d..c6f1c2bc9753 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -125,8 +125,9 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
125 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; 125 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
126} 126}
127 127
128static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) 128static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
129{ 129{
130 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
130 struct kvm_s390_interrupt_info *inti; 131 struct kvm_s390_interrupt_info *inti;
131 int rc = SIGP_CC_ORDER_CODE_ACCEPTED; 132 int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
132 133
@@ -135,7 +136,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
135 return -ENOMEM; 136 return -ENOMEM;
136 inti->type = KVM_S390_SIGP_STOP; 137 inti->type = KVM_S390_SIGP_STOP;
137 138
138 spin_lock_bh(&li->lock); 139 spin_lock(&li->lock);
139 if (li->action_bits & ACTION_STOP_ON_STOP) { 140 if (li->action_bits & ACTION_STOP_ON_STOP) {
140 /* another SIGP STOP is pending */ 141 /* another SIGP STOP is pending */
141 rc = SIGP_CC_BUSY; 142 rc = SIGP_CC_BUSY;
@@ -151,17 +152,15 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
151 atomic_set(&li->active, 1); 152 atomic_set(&li->active, 1);
152 li->action_bits |= action; 153 li->action_bits |= action;
153 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); 154 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
154 if (waitqueue_active(li->wq)) 155 kvm_s390_vcpu_wakeup(dst_vcpu);
155 wake_up_interruptible(li->wq);
156out: 156out:
157 spin_unlock_bh(&li->lock); 157 spin_unlock(&li->lock);
158 158
159 return rc; 159 return rc;
160} 160}
161 161
162static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) 162static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
163{ 163{
164 struct kvm_s390_local_interrupt *li;
165 struct kvm_vcpu *dst_vcpu = NULL; 164 struct kvm_vcpu *dst_vcpu = NULL;
166 int rc; 165 int rc;
167 166
@@ -171,9 +170,8 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
171 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 170 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
172 if (!dst_vcpu) 171 if (!dst_vcpu)
173 return SIGP_CC_NOT_OPERATIONAL; 172 return SIGP_CC_NOT_OPERATIONAL;
174 li = &dst_vcpu->arch.local_int;
175 173
176 rc = __inject_sigp_stop(li, action); 174 rc = __inject_sigp_stop(dst_vcpu, action);
177 175
178 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); 176 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
179 177
@@ -243,7 +241,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
243 if (!inti) 241 if (!inti)
244 return SIGP_CC_BUSY; 242 return SIGP_CC_BUSY;
245 243
246 spin_lock_bh(&li->lock); 244 spin_lock(&li->lock);
247 /* cpu must be in stopped state */ 245 /* cpu must be in stopped state */
248 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { 246 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
249 *reg &= 0xffffffff00000000UL; 247 *reg &= 0xffffffff00000000UL;
@@ -258,13 +256,12 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
258 256
259 list_add_tail(&inti->list, &li->list); 257 list_add_tail(&inti->list, &li->list);
260 atomic_set(&li->active, 1); 258 atomic_set(&li->active, 1);
261 if (waitqueue_active(li->wq)) 259 kvm_s390_vcpu_wakeup(dst_vcpu);
262 wake_up_interruptible(li->wq);
263 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 260 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
264 261
265 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); 262 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
266out_li: 263out_li:
267 spin_unlock_bh(&li->lock); 264 spin_unlock(&li->lock);
268 return rc; 265 return rc;
269} 266}
270 267
@@ -280,9 +277,9 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
280 if (!dst_vcpu) 277 if (!dst_vcpu)
281 return SIGP_CC_NOT_OPERATIONAL; 278 return SIGP_CC_NOT_OPERATIONAL;
282 279
283 spin_lock_bh(&dst_vcpu->arch.local_int.lock); 280 spin_lock(&dst_vcpu->arch.local_int.lock);
284 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); 281 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
285 spin_unlock_bh(&dst_vcpu->arch.local_int.lock); 282 spin_unlock(&dst_vcpu->arch.local_int.lock);
286 if (!(flags & CPUSTAT_STOPPED)) { 283 if (!(flags & CPUSTAT_STOPPED)) {
287 *reg &= 0xffffffff00000000UL; 284 *reg &= 0xffffffff00000000UL;
288 *reg |= SIGP_STATUS_INCORRECT_STATE; 285 *reg |= SIGP_STATUS_INCORRECT_STATE;
@@ -343,10 +340,10 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
343 if (!dst_vcpu) 340 if (!dst_vcpu)
344 return SIGP_CC_NOT_OPERATIONAL; 341 return SIGP_CC_NOT_OPERATIONAL;
345 li = &dst_vcpu->arch.local_int; 342 li = &dst_vcpu->arch.local_int;
346 spin_lock_bh(&li->lock); 343 spin_lock(&li->lock);
347 if (li->action_bits & ACTION_STOP_ON_STOP) 344 if (li->action_bits & ACTION_STOP_ON_STOP)
348 rc = SIGP_CC_BUSY; 345 rc = SIGP_CC_BUSY;
349 spin_unlock_bh(&li->lock); 346 spin_unlock(&li->lock);
350 347
351 return rc; 348 return rc;
352} 349}
@@ -466,12 +463,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
466 dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 463 dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
467 BUG_ON(dest_vcpu == NULL); 464 BUG_ON(dest_vcpu == NULL);
468 465
469 spin_lock_bh(&dest_vcpu->arch.local_int.lock); 466 kvm_s390_vcpu_wakeup(dest_vcpu);
470 if (waitqueue_active(&dest_vcpu->wq))
471 wake_up_interruptible(&dest_vcpu->wq);
472 dest_vcpu->preempted = true;
473 spin_unlock_bh(&dest_vcpu->arch.local_int.lock);
474
475 kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); 467 kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
476 return 0; 468 return 0;
477 } 469 }