aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-02 17:50:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-02 17:50:10 -0400
commit7cbb39d4d4d530dff12f2ff06ed6c85c504ba91a (patch)
tree82f721591d739eca99817def86ca5b6ebd682fe6 /arch/s390/kvm
parent64056a94256e7a476de67fbe581dfe5515c56288 (diff)
parent7227fc0666606b0df2c0d2966a7f4859b01bdf74 (diff)
Merge tag 'kvm-3.15-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Paolo Bonzini: "PPC and ARM do not have much going on this time. Most of the cool stuff, instead, is in s390 and (after a few releases) x86. ARM has some caching fixes and PPC has transactional memory support in guests. MIPS has some fixes, with more probably coming in 3.16 as QEMU will soon get support for MIPS KVM. For x86 there are optimizations for debug registers, which trigger on some Windows games, and other important fixes for Windows guests. We now expose to the guest Broadwell instruction set extensions and also Intel MPX. There's also a fix/workaround for OS X guests, nested virtualization features (preemption timer), and a couple kvmclock refinements. For s390, the main news is asynchronous page faults, together with improvements to IRQs (floating irqs and adapter irqs) that speed up virtio devices" * tag 'kvm-3.15-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (96 commits) KVM: PPC: Book3S HV: Save/restore host PMU registers that are new in POWER8 KVM: PPC: Book3S HV: Fix decrementer timeouts with non-zero TB offset KVM: PPC: Book3S HV: Don't use kvm_memslots() in real mode KVM: PPC: Book3S HV: Return ENODEV error rather than EIO KVM: PPC: Book3S: Trim top 4 bits of physical address in RTAS code KVM: PPC: Book3S HV: Add get/set_one_reg for new TM state KVM: PPC: Book3S HV: Add transactional memory support KVM: Specify byte order for KVM_EXIT_MMIO KVM: vmx: fix MPX detection KVM: PPC: Book3S HV: Fix KVM hang with CONFIG_KVM_XICS=n KVM: PPC: Book3S: Introduce hypervisor call H_GET_TCE KVM: PPC: Book3S HV: Fix incorrect userspace exit on ioeventfd write KVM: s390: clear local interrupts at cpu initial reset KVM: s390: Fix possible memory leak in SIGP functions KVM: s390: fix calculation of idle_mask array size KVM: s390: randomize sca address KVM: ioapic: reinject pending interrupts on KVM_SET_IRQCHIP KVM: Bump KVM_MAX_IRQ_ROUTES for s390 KVM: s390: irq routing for adapter interrupts. KVM: s390: adapter interrupt sources ...
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/Kconfig4
-rw-r--r--arch/s390/kvm/Makefile2
-rw-r--r--arch/s390/kvm/diag.c84
-rw-r--r--arch/s390/kvm/interrupt.c704
-rw-r--r--arch/s390/kvm/irq.h22
-rw-r--r--arch/s390/kvm/kvm-s390.c212
-rw-r--r--arch/s390/kvm/kvm-s390.h7
-rw-r--r--arch/s390/kvm/priv.c7
-rw-r--r--arch/s390/kvm/sigp.c157
-rw-r--r--arch/s390/kvm/trace.h46
10 files changed, 1073 insertions, 172 deletions
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 70b46eacf8e1..10d529ac9821 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -23,6 +23,10 @@ config KVM
23 select ANON_INODES 23 select ANON_INODES
24 select HAVE_KVM_CPU_RELAX_INTERCEPT 24 select HAVE_KVM_CPU_RELAX_INTERCEPT
25 select HAVE_KVM_EVENTFD 25 select HAVE_KVM_EVENTFD
26 select KVM_ASYNC_PF
27 select KVM_ASYNC_PF_SYNC
28 select HAVE_KVM_IRQCHIP
29 select HAVE_KVM_IRQ_ROUTING
26 ---help--- 30 ---help---
27 Support hosting paravirtualized guest machines using the SIE 31 Support hosting paravirtualized guest machines using the SIE
28 virtualization capability on the mainframe. This should work 32 virtualization capability on the mainframe. This should work
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 40b4c6470f88..d3adb37e93a4 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -7,7 +7,7 @@
7# as published by the Free Software Foundation. 7# as published by the Free Software Foundation.
8 8
9KVM := ../../../virt/kvm 9KVM := ../../../virt/kvm
10common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o 10common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o
11 11
12ccflags-y := -Ivirt/kvm -Iarch/s390/kvm 12ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
13 13
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 6f9cfa500372..03a05ffb662f 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -18,6 +18,7 @@
18#include "kvm-s390.h" 18#include "kvm-s390.h"
19#include "trace.h" 19#include "trace.h"
20#include "trace-s390.h" 20#include "trace-s390.h"
21#include "gaccess.h"
21 22
22static int diag_release_pages(struct kvm_vcpu *vcpu) 23static int diag_release_pages(struct kvm_vcpu *vcpu)
23{ 24{
@@ -47,6 +48,87 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
47 return 0; 48 return 0;
48} 49}
49 50
51static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
52{
53 struct prs_parm {
54 u16 code;
55 u16 subcode;
56 u16 parm_len;
57 u16 parm_version;
58 u64 token_addr;
59 u64 select_mask;
60 u64 compare_mask;
61 u64 zarch;
62 };
63 struct prs_parm parm;
64 int rc;
65 u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
66 u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
67 unsigned long hva_token = KVM_HVA_ERR_BAD;
68
69 if (vcpu->run->s.regs.gprs[rx] & 7)
70 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
71 if (copy_from_guest(vcpu, &parm, vcpu->run->s.regs.gprs[rx], sizeof(parm)))
72 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
73 if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
74 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
75
76 switch (parm.subcode) {
77 case 0: /* TOKEN */
78 if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
79 /*
80 * If the pagefault handshake is already activated,
81 * the token must not be changed. We have to return
82 * decimal 8 instead, as mandated in SC24-6084.
83 */
84 vcpu->run->s.regs.gprs[ry] = 8;
85 return 0;
86 }
87
88 if ((parm.compare_mask & parm.select_mask) != parm.compare_mask ||
89 parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
90 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
91
92 hva_token = gfn_to_hva(vcpu->kvm, gpa_to_gfn(parm.token_addr));
93 if (kvm_is_error_hva(hva_token))
94 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
95
96 vcpu->arch.pfault_token = parm.token_addr;
97 vcpu->arch.pfault_select = parm.select_mask;
98 vcpu->arch.pfault_compare = parm.compare_mask;
99 vcpu->run->s.regs.gprs[ry] = 0;
100 rc = 0;
101 break;
102 case 1: /*
103 * CANCEL
104 * Specification allows to let already pending tokens survive
105 * the cancel, therefore to reduce code complexity, we assume
106 * all outstanding tokens are already pending.
107 */
108 if (parm.token_addr || parm.select_mask ||
109 parm.compare_mask || parm.zarch)
110 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
111
112 vcpu->run->s.regs.gprs[ry] = 0;
113 /*
114 * If the pfault handling was not established or is already
115 * canceled SC24-6084 requests to return decimal 4.
116 */
117 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
118 vcpu->run->s.regs.gprs[ry] = 4;
119 else
120 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
121
122 rc = 0;
123 break;
124 default:
125 rc = -EOPNOTSUPP;
126 break;
127 }
128
129 return rc;
130}
131
50static int __diag_time_slice_end(struct kvm_vcpu *vcpu) 132static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
51{ 133{
52 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); 134 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
@@ -153,6 +235,8 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
153 return __diag_time_slice_end(vcpu); 235 return __diag_time_slice_end(vcpu);
154 case 0x9c: 236 case 0x9c:
155 return __diag_time_slice_end_directed(vcpu); 237 return __diag_time_slice_end_directed(vcpu);
238 case 0x258:
239 return __diag_page_ref_service(vcpu);
156 case 0x308: 240 case 0x308:
157 return __diag_ipl_functions(vcpu); 241 return __diag_ipl_functions(vcpu);
158 case 0x500: 242 case 0x500:
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 5f79d2d79ca7..200a8f9390b6 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * handling kvm guest interrupts 2 * handling kvm guest interrupts
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2014
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -13,6 +13,7 @@
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/kvm_host.h> 14#include <linux/kvm_host.h>
15#include <linux/hrtimer.h> 15#include <linux/hrtimer.h>
16#include <linux/mmu_context.h>
16#include <linux/signal.h> 17#include <linux/signal.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
18#include <asm/asm-offsets.h> 19#include <asm/asm-offsets.h>
@@ -31,7 +32,7 @@ static int is_ioint(u64 type)
31 return ((type & 0xfffe0000u) != 0xfffe0000u); 32 return ((type & 0xfffe0000u) != 0xfffe0000u);
32} 33}
33 34
34static int psw_extint_disabled(struct kvm_vcpu *vcpu) 35int psw_extint_disabled(struct kvm_vcpu *vcpu)
35{ 36{
36 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 37 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
37} 38}
@@ -78,11 +79,8 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
78 return 1; 79 return 1;
79 return 0; 80 return 0;
80 case KVM_S390_INT_SERVICE: 81 case KVM_S390_INT_SERVICE:
81 if (psw_extint_disabled(vcpu)) 82 case KVM_S390_INT_PFAULT_INIT:
82 return 0; 83 case KVM_S390_INT_PFAULT_DONE:
83 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
84 return 1;
85 return 0;
86 case KVM_S390_INT_VIRTIO: 84 case KVM_S390_INT_VIRTIO:
87 if (psw_extint_disabled(vcpu)) 85 if (psw_extint_disabled(vcpu))
88 return 0; 86 return 0;
@@ -117,14 +115,12 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
117 115
118static void __set_cpu_idle(struct kvm_vcpu *vcpu) 116static void __set_cpu_idle(struct kvm_vcpu *vcpu)
119{ 117{
120 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
121 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 118 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
122 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 119 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
123} 120}
124 121
125static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 122static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
126{ 123{
127 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
128 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 124 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
129 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 125 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
130} 126}
@@ -150,6 +146,8 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
150 case KVM_S390_INT_EXTERNAL_CALL: 146 case KVM_S390_INT_EXTERNAL_CALL:
151 case KVM_S390_INT_EMERGENCY: 147 case KVM_S390_INT_EMERGENCY:
152 case KVM_S390_INT_SERVICE: 148 case KVM_S390_INT_SERVICE:
149 case KVM_S390_INT_PFAULT_INIT:
150 case KVM_S390_INT_PFAULT_DONE:
153 case KVM_S390_INT_VIRTIO: 151 case KVM_S390_INT_VIRTIO:
154 if (psw_extint_disabled(vcpu)) 152 if (psw_extint_disabled(vcpu))
155 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 153 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
@@ -223,6 +221,30 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
223 rc |= put_guest(vcpu, inti->ext.ext_params, 221 rc |= put_guest(vcpu, inti->ext.ext_params,
224 (u32 __user *)__LC_EXT_PARAMS); 222 (u32 __user *)__LC_EXT_PARAMS);
225 break; 223 break;
224 case KVM_S390_INT_PFAULT_INIT:
225 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
226 inti->ext.ext_params2);
227 rc = put_guest(vcpu, 0x2603, (u16 __user *) __LC_EXT_INT_CODE);
228 rc |= put_guest(vcpu, 0x0600, (u16 __user *) __LC_EXT_CPU_ADDR);
229 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
230 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
231 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
232 __LC_EXT_NEW_PSW, sizeof(psw_t));
233 rc |= put_guest(vcpu, inti->ext.ext_params2,
234 (u64 __user *) __LC_EXT_PARAMS2);
235 break;
236 case KVM_S390_INT_PFAULT_DONE:
237 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
238 inti->ext.ext_params2);
239 rc = put_guest(vcpu, 0x2603, (u16 __user *) __LC_EXT_INT_CODE);
240 rc |= put_guest(vcpu, 0x0680, (u16 __user *) __LC_EXT_CPU_ADDR);
241 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
242 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
243 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
244 __LC_EXT_NEW_PSW, sizeof(psw_t));
245 rc |= put_guest(vcpu, inti->ext.ext_params2,
246 (u64 __user *) __LC_EXT_PARAMS2);
247 break;
226 case KVM_S390_INT_VIRTIO: 248 case KVM_S390_INT_VIRTIO:
227 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", 249 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
228 inti->ext.ext_params, inti->ext.ext_params2); 250 inti->ext.ext_params, inti->ext.ext_params2);
@@ -357,7 +379,7 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
357 return 1; 379 return 1;
358} 380}
359 381
360static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 382int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
361{ 383{
362 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 384 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
363 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 385 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
@@ -482,11 +504,26 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
482 struct kvm_vcpu *vcpu; 504 struct kvm_vcpu *vcpu;
483 505
484 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 506 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
507 vcpu->preempted = true;
485 tasklet_schedule(&vcpu->arch.tasklet); 508 tasklet_schedule(&vcpu->arch.tasklet);
486 509
487 return HRTIMER_NORESTART; 510 return HRTIMER_NORESTART;
488} 511}
489 512
513void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
514{
515 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
516 struct kvm_s390_interrupt_info *n, *inti = NULL;
517
518 spin_lock_bh(&li->lock);
519 list_for_each_entry_safe(inti, n, &li->list, list) {
520 list_del(&inti->list);
521 kfree(inti);
522 }
523 atomic_set(&li->active, 0);
524 spin_unlock_bh(&li->lock);
525}
526
490void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 527void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
491{ 528{
492 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 529 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -528,6 +565,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
528 list_for_each_entry_safe(inti, n, &fi->list, list) { 565 list_for_each_entry_safe(inti, n, &fi->list, list) {
529 if (__interrupt_is_deliverable(vcpu, inti)) { 566 if (__interrupt_is_deliverable(vcpu, inti)) {
530 list_del(&inti->list); 567 list_del(&inti->list);
568 fi->irq_count--;
531 deliver = 1; 569 deliver = 1;
532 break; 570 break;
533 } 571 }
@@ -583,6 +621,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
583 if ((inti->type == KVM_S390_MCHK) && 621 if ((inti->type == KVM_S390_MCHK) &&
584 __interrupt_is_deliverable(vcpu, inti)) { 622 __interrupt_is_deliverable(vcpu, inti)) {
585 list_del(&inti->list); 623 list_del(&inti->list);
624 fi->irq_count--;
586 deliver = 1; 625 deliver = 1;
587 break; 626 break;
588 } 627 }
@@ -650,8 +689,10 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
650 inti = iter; 689 inti = iter;
651 break; 690 break;
652 } 691 }
653 if (inti) 692 if (inti) {
654 list_del_init(&inti->list); 693 list_del_init(&inti->list);
694 fi->irq_count--;
695 }
655 if (list_empty(&fi->list)) 696 if (list_empty(&fi->list))
656 atomic_set(&fi->active, 0); 697 atomic_set(&fi->active, 0);
657 spin_unlock(&fi->lock); 698 spin_unlock(&fi->lock);
@@ -659,53 +700,101 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
659 return inti; 700 return inti;
660} 701}
661 702
662int kvm_s390_inject_vm(struct kvm *kvm, 703static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
663 struct kvm_s390_interrupt *s390int)
664{ 704{
665 struct kvm_s390_local_interrupt *li; 705 struct kvm_s390_local_interrupt *li;
666 struct kvm_s390_float_interrupt *fi; 706 struct kvm_s390_float_interrupt *fi;
667 struct kvm_s390_interrupt_info *inti, *iter; 707 struct kvm_s390_interrupt_info *iter;
708 struct kvm_vcpu *dst_vcpu = NULL;
668 int sigcpu; 709 int sigcpu;
710 int rc = 0;
711
712 mutex_lock(&kvm->lock);
713 fi = &kvm->arch.float_int;
714 spin_lock(&fi->lock);
715 if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
716 rc = -EINVAL;
717 goto unlock_fi;
718 }
719 fi->irq_count++;
720 if (!is_ioint(inti->type)) {
721 list_add_tail(&inti->list, &fi->list);
722 } else {
723 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
724
725 /* Keep I/O interrupts sorted in isc order. */
726 list_for_each_entry(iter, &fi->list, list) {
727 if (!is_ioint(iter->type))
728 continue;
729 if (int_word_to_isc_bits(iter->io.io_int_word)
730 <= isc_bits)
731 continue;
732 break;
733 }
734 list_add_tail(&inti->list, &iter->list);
735 }
736 atomic_set(&fi->active, 1);
737 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
738 if (sigcpu == KVM_MAX_VCPUS) {
739 do {
740 sigcpu = fi->next_rr_cpu++;
741 if (sigcpu == KVM_MAX_VCPUS)
742 sigcpu = fi->next_rr_cpu = 0;
743 } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
744 }
745 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
746 li = &dst_vcpu->arch.local_int;
747 spin_lock_bh(&li->lock);
748 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
749 if (waitqueue_active(li->wq))
750 wake_up_interruptible(li->wq);
751 kvm_get_vcpu(kvm, sigcpu)->preempted = true;
752 spin_unlock_bh(&li->lock);
753unlock_fi:
754 spin_unlock(&fi->lock);
755 mutex_unlock(&kvm->lock);
756 return rc;
757}
758
759int kvm_s390_inject_vm(struct kvm *kvm,
760 struct kvm_s390_interrupt *s390int)
761{
762 struct kvm_s390_interrupt_info *inti;
669 763
670 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 764 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
671 if (!inti) 765 if (!inti)
672 return -ENOMEM; 766 return -ENOMEM;
673 767
674 switch (s390int->type) { 768 inti->type = s390int->type;
769 switch (inti->type) {
675 case KVM_S390_INT_VIRTIO: 770 case KVM_S390_INT_VIRTIO:
676 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", 771 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
677 s390int->parm, s390int->parm64); 772 s390int->parm, s390int->parm64);
678 inti->type = s390int->type;
679 inti->ext.ext_params = s390int->parm; 773 inti->ext.ext_params = s390int->parm;
680 inti->ext.ext_params2 = s390int->parm64; 774 inti->ext.ext_params2 = s390int->parm64;
681 break; 775 break;
682 case KVM_S390_INT_SERVICE: 776 case KVM_S390_INT_SERVICE:
683 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); 777 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
684 inti->type = s390int->type;
685 inti->ext.ext_params = s390int->parm; 778 inti->ext.ext_params = s390int->parm;
686 break; 779 break;
687 case KVM_S390_PROGRAM_INT: 780 case KVM_S390_INT_PFAULT_DONE:
688 case KVM_S390_SIGP_STOP: 781 inti->type = s390int->type;
689 case KVM_S390_INT_EXTERNAL_CALL: 782 inti->ext.ext_params2 = s390int->parm64;
690 case KVM_S390_INT_EMERGENCY: 783 break;
691 kfree(inti);
692 return -EINVAL;
693 case KVM_S390_MCHK: 784 case KVM_S390_MCHK:
694 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", 785 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
695 s390int->parm64); 786 s390int->parm64);
696 inti->type = s390int->type;
697 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ 787 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
698 inti->mchk.mcic = s390int->parm64; 788 inti->mchk.mcic = s390int->parm64;
699 break; 789 break;
700 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 790 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
701 if (s390int->type & IOINT_AI_MASK) 791 if (inti->type & IOINT_AI_MASK)
702 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); 792 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
703 else 793 else
704 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", 794 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
705 s390int->type & IOINT_CSSID_MASK, 795 s390int->type & IOINT_CSSID_MASK,
706 s390int->type & IOINT_SSID_MASK, 796 s390int->type & IOINT_SSID_MASK,
707 s390int->type & IOINT_SCHID_MASK); 797 s390int->type & IOINT_SCHID_MASK);
708 inti->type = s390int->type;
709 inti->io.subchannel_id = s390int->parm >> 16; 798 inti->io.subchannel_id = s390int->parm >> 16;
710 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; 799 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
711 inti->io.io_int_parm = s390int->parm64 >> 32; 800 inti->io.io_int_parm = s390int->parm64 >> 32;
@@ -718,43 +807,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
718 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, 807 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
719 2); 808 2);
720 809
721 mutex_lock(&kvm->lock); 810 return __inject_vm(kvm, inti);
722 fi = &kvm->arch.float_int;
723 spin_lock(&fi->lock);
724 if (!is_ioint(inti->type))
725 list_add_tail(&inti->list, &fi->list);
726 else {
727 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
728
729 /* Keep I/O interrupts sorted in isc order. */
730 list_for_each_entry(iter, &fi->list, list) {
731 if (!is_ioint(iter->type))
732 continue;
733 if (int_word_to_isc_bits(iter->io.io_int_word)
734 <= isc_bits)
735 continue;
736 break;
737 }
738 list_add_tail(&inti->list, &iter->list);
739 }
740 atomic_set(&fi->active, 1);
741 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
742 if (sigcpu == KVM_MAX_VCPUS) {
743 do {
744 sigcpu = fi->next_rr_cpu++;
745 if (sigcpu == KVM_MAX_VCPUS)
746 sigcpu = fi->next_rr_cpu = 0;
747 } while (fi->local_int[sigcpu] == NULL);
748 }
749 li = fi->local_int[sigcpu];
750 spin_lock_bh(&li->lock);
751 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
752 if (waitqueue_active(li->wq))
753 wake_up_interruptible(li->wq);
754 spin_unlock_bh(&li->lock);
755 spin_unlock(&fi->lock);
756 mutex_unlock(&kvm->lock);
757 return 0;
758} 811}
759 812
760int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 813int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
@@ -814,6 +867,10 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
814 inti->type = s390int->type; 867 inti->type = s390int->type;
815 inti->mchk.mcic = s390int->parm64; 868 inti->mchk.mcic = s390int->parm64;
816 break; 869 break;
870 case KVM_S390_INT_PFAULT_INIT:
871 inti->type = s390int->type;
872 inti->ext.ext_params2 = s390int->parm64;
873 break;
817 case KVM_S390_INT_VIRTIO: 874 case KVM_S390_INT_VIRTIO:
818 case KVM_S390_INT_SERVICE: 875 case KVM_S390_INT_SERVICE:
819 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 876 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
@@ -837,7 +894,528 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
837 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 894 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
838 if (waitqueue_active(&vcpu->wq)) 895 if (waitqueue_active(&vcpu->wq))
839 wake_up_interruptible(&vcpu->wq); 896 wake_up_interruptible(&vcpu->wq);
897 vcpu->preempted = true;
840 spin_unlock_bh(&li->lock); 898 spin_unlock_bh(&li->lock);
841 mutex_unlock(&vcpu->kvm->lock); 899 mutex_unlock(&vcpu->kvm->lock);
842 return 0; 900 return 0;
843} 901}
902
903static void clear_floating_interrupts(struct kvm *kvm)
904{
905 struct kvm_s390_float_interrupt *fi;
906 struct kvm_s390_interrupt_info *n, *inti = NULL;
907
908 mutex_lock(&kvm->lock);
909 fi = &kvm->arch.float_int;
910 spin_lock(&fi->lock);
911 list_for_each_entry_safe(inti, n, &fi->list, list) {
912 list_del(&inti->list);
913 kfree(inti);
914 }
915 fi->irq_count = 0;
916 atomic_set(&fi->active, 0);
917 spin_unlock(&fi->lock);
918 mutex_unlock(&kvm->lock);
919}
920
921static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
922 u8 *addr)
923{
924 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
925 struct kvm_s390_irq irq = {0};
926
927 irq.type = inti->type;
928 switch (inti->type) {
929 case KVM_S390_INT_PFAULT_INIT:
930 case KVM_S390_INT_PFAULT_DONE:
931 case KVM_S390_INT_VIRTIO:
932 case KVM_S390_INT_SERVICE:
933 irq.u.ext = inti->ext;
934 break;
935 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
936 irq.u.io = inti->io;
937 break;
938 case KVM_S390_MCHK:
939 irq.u.mchk = inti->mchk;
940 break;
941 default:
942 return -EINVAL;
943 }
944
945 if (copy_to_user(uptr, &irq, sizeof(irq)))
946 return -EFAULT;
947
948 return 0;
949}
950
951static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
952{
953 struct kvm_s390_interrupt_info *inti;
954 struct kvm_s390_float_interrupt *fi;
955 int ret = 0;
956 int n = 0;
957
958 mutex_lock(&kvm->lock);
959 fi = &kvm->arch.float_int;
960 spin_lock(&fi->lock);
961
962 list_for_each_entry(inti, &fi->list, list) {
963 if (len < sizeof(struct kvm_s390_irq)) {
964 /* signal userspace to try again */
965 ret = -ENOMEM;
966 break;
967 }
968 ret = copy_irq_to_user(inti, buf);
969 if (ret)
970 break;
971 buf += sizeof(struct kvm_s390_irq);
972 len -= sizeof(struct kvm_s390_irq);
973 n++;
974 }
975
976 spin_unlock(&fi->lock);
977 mutex_unlock(&kvm->lock);
978
979 return ret < 0 ? ret : n;
980}
981
982static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
983{
984 int r;
985
986 switch (attr->group) {
987 case KVM_DEV_FLIC_GET_ALL_IRQS:
988 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
989 attr->attr);
990 break;
991 default:
992 r = -EINVAL;
993 }
994
995 return r;
996}
997
998static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
999 u64 addr)
1000{
1001 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1002 void *target = NULL;
1003 void __user *source;
1004 u64 size;
1005
1006 if (get_user(inti->type, (u64 __user *)addr))
1007 return -EFAULT;
1008
1009 switch (inti->type) {
1010 case KVM_S390_INT_PFAULT_INIT:
1011 case KVM_S390_INT_PFAULT_DONE:
1012 case KVM_S390_INT_VIRTIO:
1013 case KVM_S390_INT_SERVICE:
1014 target = (void *) &inti->ext;
1015 source = &uptr->u.ext;
1016 size = sizeof(inti->ext);
1017 break;
1018 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1019 target = (void *) &inti->io;
1020 source = &uptr->u.io;
1021 size = sizeof(inti->io);
1022 break;
1023 case KVM_S390_MCHK:
1024 target = (void *) &inti->mchk;
1025 source = &uptr->u.mchk;
1026 size = sizeof(inti->mchk);
1027 break;
1028 default:
1029 return -EINVAL;
1030 }
1031
1032 if (copy_from_user(target, source, size))
1033 return -EFAULT;
1034
1035 return 0;
1036}
1037
1038static int enqueue_floating_irq(struct kvm_device *dev,
1039 struct kvm_device_attr *attr)
1040{
1041 struct kvm_s390_interrupt_info *inti = NULL;
1042 int r = 0;
1043 int len = attr->attr;
1044
1045 if (len % sizeof(struct kvm_s390_irq) != 0)
1046 return -EINVAL;
1047 else if (len > KVM_S390_FLIC_MAX_BUFFER)
1048 return -EINVAL;
1049
1050 while (len >= sizeof(struct kvm_s390_irq)) {
1051 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1052 if (!inti)
1053 return -ENOMEM;
1054
1055 r = copy_irq_from_user(inti, attr->addr);
1056 if (r) {
1057 kfree(inti);
1058 return r;
1059 }
1060 r = __inject_vm(dev->kvm, inti);
1061 if (r) {
1062 kfree(inti);
1063 return r;
1064 }
1065 len -= sizeof(struct kvm_s390_irq);
1066 attr->addr += sizeof(struct kvm_s390_irq);
1067 }
1068
1069 return r;
1070}
1071
1072static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1073{
1074 if (id >= MAX_S390_IO_ADAPTERS)
1075 return NULL;
1076 return kvm->arch.adapters[id];
1077}
1078
1079static int register_io_adapter(struct kvm_device *dev,
1080 struct kvm_device_attr *attr)
1081{
1082 struct s390_io_adapter *adapter;
1083 struct kvm_s390_io_adapter adapter_info;
1084
1085 if (copy_from_user(&adapter_info,
1086 (void __user *)attr->addr, sizeof(adapter_info)))
1087 return -EFAULT;
1088
1089 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1090 (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1091 return -EINVAL;
1092
1093 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1094 if (!adapter)
1095 return -ENOMEM;
1096
1097 INIT_LIST_HEAD(&adapter->maps);
1098 init_rwsem(&adapter->maps_lock);
1099 atomic_set(&adapter->nr_maps, 0);
1100 adapter->id = adapter_info.id;
1101 adapter->isc = adapter_info.isc;
1102 adapter->maskable = adapter_info.maskable;
1103 adapter->masked = false;
1104 adapter->swap = adapter_info.swap;
1105 dev->kvm->arch.adapters[adapter->id] = adapter;
1106
1107 return 0;
1108}
1109
1110int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1111{
1112 int ret;
1113 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1114
1115 if (!adapter || !adapter->maskable)
1116 return -EINVAL;
1117 ret = adapter->masked;
1118 adapter->masked = masked;
1119 return ret;
1120}
1121
1122static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1123{
1124 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1125 struct s390_map_info *map;
1126 int ret;
1127
1128 if (!adapter || !addr)
1129 return -EINVAL;
1130
1131 map = kzalloc(sizeof(*map), GFP_KERNEL);
1132 if (!map) {
1133 ret = -ENOMEM;
1134 goto out;
1135 }
1136 INIT_LIST_HEAD(&map->list);
1137 map->guest_addr = addr;
1138 map->addr = gmap_translate(addr, kvm->arch.gmap);
1139 if (map->addr == -EFAULT) {
1140 ret = -EFAULT;
1141 goto out;
1142 }
1143 ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1144 if (ret < 0)
1145 goto out;
1146 BUG_ON(ret != 1);
1147 down_write(&adapter->maps_lock);
1148 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1149 list_add_tail(&map->list, &adapter->maps);
1150 ret = 0;
1151 } else {
1152 put_page(map->page);
1153 ret = -EINVAL;
1154 }
1155 up_write(&adapter->maps_lock);
1156out:
1157 if (ret)
1158 kfree(map);
1159 return ret;
1160}
1161
1162static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1163{
1164 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1165 struct s390_map_info *map, *tmp;
1166 int found = 0;
1167
1168 if (!adapter || !addr)
1169 return -EINVAL;
1170
1171 down_write(&adapter->maps_lock);
1172 list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1173 if (map->guest_addr == addr) {
1174 found = 1;
1175 atomic_dec(&adapter->nr_maps);
1176 list_del(&map->list);
1177 put_page(map->page);
1178 kfree(map);
1179 break;
1180 }
1181 }
1182 up_write(&adapter->maps_lock);
1183
1184 return found ? 0 : -EINVAL;
1185}
1186
1187void kvm_s390_destroy_adapters(struct kvm *kvm)
1188{
1189 int i;
1190 struct s390_map_info *map, *tmp;
1191
1192 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1193 if (!kvm->arch.adapters[i])
1194 continue;
1195 list_for_each_entry_safe(map, tmp,
1196 &kvm->arch.adapters[i]->maps, list) {
1197 list_del(&map->list);
1198 put_page(map->page);
1199 kfree(map);
1200 }
1201 kfree(kvm->arch.adapters[i]);
1202 }
1203}
1204
1205static int modify_io_adapter(struct kvm_device *dev,
1206 struct kvm_device_attr *attr)
1207{
1208 struct kvm_s390_io_adapter_req req;
1209 struct s390_io_adapter *adapter;
1210 int ret;
1211
1212 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1213 return -EFAULT;
1214
1215 adapter = get_io_adapter(dev->kvm, req.id);
1216 if (!adapter)
1217 return -EINVAL;
1218 switch (req.type) {
1219 case KVM_S390_IO_ADAPTER_MASK:
1220 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1221 if (ret > 0)
1222 ret = 0;
1223 break;
1224 case KVM_S390_IO_ADAPTER_MAP:
1225 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1226 break;
1227 case KVM_S390_IO_ADAPTER_UNMAP:
1228 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1229 break;
1230 default:
1231 ret = -EINVAL;
1232 }
1233
1234 return ret;
1235}
1236
1237static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1238{
1239 int r = 0;
1240 unsigned int i;
1241 struct kvm_vcpu *vcpu;
1242
1243 switch (attr->group) {
1244 case KVM_DEV_FLIC_ENQUEUE:
1245 r = enqueue_floating_irq(dev, attr);
1246 break;
1247 case KVM_DEV_FLIC_CLEAR_IRQS:
1248 r = 0;
1249 clear_floating_interrupts(dev->kvm);
1250 break;
1251 case KVM_DEV_FLIC_APF_ENABLE:
1252 dev->kvm->arch.gmap->pfault_enabled = 1;
1253 break;
1254 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1255 dev->kvm->arch.gmap->pfault_enabled = 0;
1256 /*
1257 * Make sure no async faults are in transition when
1258 * clearing the queues. So we don't need to worry
1259 * about late coming workers.
1260 */
1261 synchronize_srcu(&dev->kvm->srcu);
1262 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1263 kvm_clear_async_pf_completion_queue(vcpu);
1264 break;
1265 case KVM_DEV_FLIC_ADAPTER_REGISTER:
1266 r = register_io_adapter(dev, attr);
1267 break;
1268 case KVM_DEV_FLIC_ADAPTER_MODIFY:
1269 r = modify_io_adapter(dev, attr);
1270 break;
1271 default:
1272 r = -EINVAL;
1273 }
1274
1275 return r;
1276}
1277
1278static int flic_create(struct kvm_device *dev, u32 type)
1279{
1280 if (!dev)
1281 return -EINVAL;
1282 if (dev->kvm->arch.flic)
1283 return -EINVAL;
1284 dev->kvm->arch.flic = dev;
1285 return 0;
1286}
1287
1288static void flic_destroy(struct kvm_device *dev)
1289{
1290 dev->kvm->arch.flic = NULL;
1291 kfree(dev);
1292}
1293
1294/* s390 floating irq controller (flic) */
1295struct kvm_device_ops kvm_flic_ops = {
1296 .name = "kvm-flic",
1297 .get_attr = flic_get_attr,
1298 .set_attr = flic_set_attr,
1299 .create = flic_create,
1300 .destroy = flic_destroy,
1301};
1302
1303static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
1304{
1305 unsigned long bit;
1306
1307 bit = bit_nr + (addr % PAGE_SIZE) * 8;
1308
1309 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
1310}
1311
1312static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
1313 u64 addr)
1314{
1315 struct s390_map_info *map;
1316
1317 if (!adapter)
1318 return NULL;
1319
1320 list_for_each_entry(map, &adapter->maps, list) {
1321 if (map->guest_addr == addr)
1322 return map;
1323 }
1324 return NULL;
1325}
1326
1327static int adapter_indicators_set(struct kvm *kvm,
1328 struct s390_io_adapter *adapter,
1329 struct kvm_s390_adapter_int *adapter_int)
1330{
1331 unsigned long bit;
1332 int summary_set, idx;
1333 struct s390_map_info *info;
1334 void *map;
1335
1336 info = get_map_info(adapter, adapter_int->ind_addr);
1337 if (!info)
1338 return -1;
1339 map = page_address(info->page);
1340 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
1341 set_bit(bit, map);
1342 idx = srcu_read_lock(&kvm->srcu);
1343 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1344 set_page_dirty_lock(info->page);
1345 info = get_map_info(adapter, adapter_int->summary_addr);
1346 if (!info) {
1347 srcu_read_unlock(&kvm->srcu, idx);
1348 return -1;
1349 }
1350 map = page_address(info->page);
1351 bit = get_ind_bit(info->addr, adapter_int->summary_offset,
1352 adapter->swap);
1353 summary_set = test_and_set_bit(bit, map);
1354 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1355 set_page_dirty_lock(info->page);
1356 srcu_read_unlock(&kvm->srcu, idx);
1357 return summary_set ? 0 : 1;
1358}
1359
1360/*
1361 * < 0 - not injected due to error
1362 * = 0 - coalesced, summary indicator already active
1363 * > 0 - injected interrupt
1364 */
1365static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
1366 struct kvm *kvm, int irq_source_id, int level,
1367 bool line_status)
1368{
1369 int ret;
1370 struct s390_io_adapter *adapter;
1371
1372 /* We're only interested in the 0->1 transition. */
1373 if (!level)
1374 return 0;
1375 adapter = get_io_adapter(kvm, e->adapter.adapter_id);
1376 if (!adapter)
1377 return -1;
1378 down_read(&adapter->maps_lock);
1379 ret = adapter_indicators_set(kvm, adapter, &e->adapter);
1380 up_read(&adapter->maps_lock);
1381 if ((ret > 0) && !adapter->masked) {
1382 struct kvm_s390_interrupt s390int = {
1383 .type = KVM_S390_INT_IO(1, 0, 0, 0),
1384 .parm = 0,
1385 .parm64 = (adapter->isc << 27) | 0x80000000,
1386 };
1387 ret = kvm_s390_inject_vm(kvm, &s390int);
1388 if (ret == 0)
1389 ret = 1;
1390 }
1391 return ret;
1392}
1393
1394int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
1395 struct kvm_kernel_irq_routing_entry *e,
1396 const struct kvm_irq_routing_entry *ue)
1397{
1398 int ret;
1399
1400 switch (ue->type) {
1401 case KVM_IRQ_ROUTING_S390_ADAPTER:
1402 e->set = set_adapter_int;
1403 e->adapter.summary_addr = ue->u.adapter.summary_addr;
1404 e->adapter.ind_addr = ue->u.adapter.ind_addr;
1405 e->adapter.summary_offset = ue->u.adapter.summary_offset;
1406 e->adapter.ind_offset = ue->u.adapter.ind_offset;
1407 e->adapter.adapter_id = ue->u.adapter.adapter_id;
1408 ret = 0;
1409 break;
1410 default:
1411 ret = -EINVAL;
1412 }
1413
1414 return ret;
1415}
1416
1417int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1418 int irq_source_id, int level, bool line_status)
1419{
1420 return -EINVAL;
1421}
diff --git a/arch/s390/kvm/irq.h b/arch/s390/kvm/irq.h
new file mode 100644
index 000000000000..d98e4159643d
--- /dev/null
+++ b/arch/s390/kvm/irq.h
@@ -0,0 +1,22 @@
1/*
2 * s390 irqchip routines
3 *
4 * Copyright IBM Corp. 2014
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
11 */
12#ifndef __KVM_IRQ_H
13#define __KVM_IRQ_H
14
15#include <linux/kvm_host.h>
16
17static inline int irqchip_in_kernel(struct kvm *kvm)
18{
19 return 1;
20}
21
22#endif
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 10b5db3c9bc4..b3ecb8f5b6ce 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -153,11 +153,14 @@ int kvm_dev_ioctl_check_extension(long ext)
153#ifdef CONFIG_KVM_S390_UCONTROL 153#ifdef CONFIG_KVM_S390_UCONTROL
154 case KVM_CAP_S390_UCONTROL: 154 case KVM_CAP_S390_UCONTROL:
155#endif 155#endif
156 case KVM_CAP_ASYNC_PF:
156 case KVM_CAP_SYNC_REGS: 157 case KVM_CAP_SYNC_REGS:
157 case KVM_CAP_ONE_REG: 158 case KVM_CAP_ONE_REG:
158 case KVM_CAP_ENABLE_CAP: 159 case KVM_CAP_ENABLE_CAP:
159 case KVM_CAP_S390_CSS_SUPPORT: 160 case KVM_CAP_S390_CSS_SUPPORT:
160 case KVM_CAP_IOEVENTFD: 161 case KVM_CAP_IOEVENTFD:
162 case KVM_CAP_DEVICE_CTRL:
163 case KVM_CAP_ENABLE_CAP_VM:
161 r = 1; 164 r = 1;
162 break; 165 break;
163 case KVM_CAP_NR_VCPUS: 166 case KVM_CAP_NR_VCPUS:
@@ -186,6 +189,25 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
186 return 0; 189 return 0;
187} 190}
188 191
192static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
193{
194 int r;
195
196 if (cap->flags)
197 return -EINVAL;
198
199 switch (cap->cap) {
200 case KVM_CAP_S390_IRQCHIP:
201 kvm->arch.use_irqchip = 1;
202 r = 0;
203 break;
204 default:
205 r = -EINVAL;
206 break;
207 }
208 return r;
209}
210
189long kvm_arch_vm_ioctl(struct file *filp, 211long kvm_arch_vm_ioctl(struct file *filp,
190 unsigned int ioctl, unsigned long arg) 212 unsigned int ioctl, unsigned long arg)
191{ 213{
@@ -203,6 +225,26 @@ long kvm_arch_vm_ioctl(struct file *filp,
203 r = kvm_s390_inject_vm(kvm, &s390int); 225 r = kvm_s390_inject_vm(kvm, &s390int);
204 break; 226 break;
205 } 227 }
228 case KVM_ENABLE_CAP: {
229 struct kvm_enable_cap cap;
230 r = -EFAULT;
231 if (copy_from_user(&cap, argp, sizeof(cap)))
232 break;
233 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
234 break;
235 }
236 case KVM_CREATE_IRQCHIP: {
237 struct kvm_irq_routing_entry routing;
238
239 r = -EINVAL;
240 if (kvm->arch.use_irqchip) {
241 /* Set up dummy routing. */
242 memset(&routing, 0, sizeof(routing));
243 kvm_set_irq_routing(kvm, &routing, 0, 0);
244 r = 0;
245 }
246 break;
247 }
206 default: 248 default:
207 r = -ENOTTY; 249 r = -ENOTTY;
208 } 250 }
@@ -214,6 +256,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
214{ 256{
215 int rc; 257 int rc;
216 char debug_name[16]; 258 char debug_name[16];
259 static unsigned long sca_offset;
217 260
218 rc = -EINVAL; 261 rc = -EINVAL;
219#ifdef CONFIG_KVM_S390_UCONTROL 262#ifdef CONFIG_KVM_S390_UCONTROL
@@ -235,6 +278,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
235 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); 278 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
236 if (!kvm->arch.sca) 279 if (!kvm->arch.sca)
237 goto out_err; 280 goto out_err;
281 spin_lock(&kvm_lock);
282 sca_offset = (sca_offset + 16) & 0x7f0;
283 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
284 spin_unlock(&kvm_lock);
238 285
239 sprintf(debug_name, "kvm-%u", current->pid); 286 sprintf(debug_name, "kvm-%u", current->pid);
240 287
@@ -255,9 +302,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
255 if (!kvm->arch.gmap) 302 if (!kvm->arch.gmap)
256 goto out_nogmap; 303 goto out_nogmap;
257 kvm->arch.gmap->private = kvm; 304 kvm->arch.gmap->private = kvm;
305 kvm->arch.gmap->pfault_enabled = 0;
258 } 306 }
259 307
260 kvm->arch.css_support = 0; 308 kvm->arch.css_support = 0;
309 kvm->arch.use_irqchip = 0;
261 310
262 return 0; 311 return 0;
263out_nogmap: 312out_nogmap:
@@ -272,6 +321,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
272{ 321{
273 VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 322 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
274 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); 323 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
324 kvm_clear_async_pf_completion_queue(vcpu);
275 if (!kvm_is_ucontrol(vcpu->kvm)) { 325 if (!kvm_is_ucontrol(vcpu->kvm)) {
276 clear_bit(63 - vcpu->vcpu_id, 326 clear_bit(63 - vcpu->vcpu_id,
277 (unsigned long *) &vcpu->kvm->arch.sca->mcn); 327 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
@@ -320,11 +370,14 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
320 debug_unregister(kvm->arch.dbf); 370 debug_unregister(kvm->arch.dbf);
321 if (!kvm_is_ucontrol(kvm)) 371 if (!kvm_is_ucontrol(kvm))
322 gmap_free(kvm->arch.gmap); 372 gmap_free(kvm->arch.gmap);
373 kvm_s390_destroy_adapters(kvm);
323} 374}
324 375
325/* Section: vcpu related */ 376/* Section: vcpu related */
326int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 377int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
327{ 378{
379 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
380 kvm_clear_async_pf_completion_queue(vcpu);
328 if (kvm_is_ucontrol(vcpu->kvm)) { 381 if (kvm_is_ucontrol(vcpu->kvm)) {
329 vcpu->arch.gmap = gmap_alloc(current->mm); 382 vcpu->arch.gmap = gmap_alloc(current->mm);
330 if (!vcpu->arch.gmap) 383 if (!vcpu->arch.gmap)
@@ -385,7 +438,11 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
385 vcpu->arch.guest_fpregs.fpc = 0; 438 vcpu->arch.guest_fpregs.fpc = 0;
386 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); 439 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
387 vcpu->arch.sie_block->gbea = 1; 440 vcpu->arch.sie_block->gbea = 1;
441 vcpu->arch.sie_block->pp = 0;
442 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
443 kvm_clear_async_pf_completion_queue(vcpu);
388 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 444 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
445 kvm_s390_clear_local_irqs(vcpu);
389} 446}
390 447
391int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 448int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
@@ -466,11 +523,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
466 spin_lock_init(&vcpu->arch.local_int.lock); 523 spin_lock_init(&vcpu->arch.local_int.lock);
467 INIT_LIST_HEAD(&vcpu->arch.local_int.list); 524 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
468 vcpu->arch.local_int.float_int = &kvm->arch.float_int; 525 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
469 spin_lock(&kvm->arch.float_int.lock);
470 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
471 vcpu->arch.local_int.wq = &vcpu->wq; 526 vcpu->arch.local_int.wq = &vcpu->wq;
472 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 527 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
473 spin_unlock(&kvm->arch.float_int.lock);
474 528
475 rc = kvm_vcpu_init(vcpu, kvm, id); 529 rc = kvm_vcpu_init(vcpu, kvm, id);
476 if (rc) 530 if (rc)
@@ -490,9 +544,7 @@ out:
490 544
491int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 545int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
492{ 546{
493 /* kvm common code refers to this, but never calls it */ 547 return kvm_cpu_has_interrupt(vcpu);
494 BUG();
495 return 0;
496} 548}
497 549
498void s390_vcpu_block(struct kvm_vcpu *vcpu) 550void s390_vcpu_block(struct kvm_vcpu *vcpu)
@@ -568,6 +620,26 @@ static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
568 r = put_user(vcpu->arch.sie_block->ckc, 620 r = put_user(vcpu->arch.sie_block->ckc,
569 (u64 __user *)reg->addr); 621 (u64 __user *)reg->addr);
570 break; 622 break;
623 case KVM_REG_S390_PFTOKEN:
624 r = put_user(vcpu->arch.pfault_token,
625 (u64 __user *)reg->addr);
626 break;
627 case KVM_REG_S390_PFCOMPARE:
628 r = put_user(vcpu->arch.pfault_compare,
629 (u64 __user *)reg->addr);
630 break;
631 case KVM_REG_S390_PFSELECT:
632 r = put_user(vcpu->arch.pfault_select,
633 (u64 __user *)reg->addr);
634 break;
635 case KVM_REG_S390_PP:
636 r = put_user(vcpu->arch.sie_block->pp,
637 (u64 __user *)reg->addr);
638 break;
639 case KVM_REG_S390_GBEA:
640 r = put_user(vcpu->arch.sie_block->gbea,
641 (u64 __user *)reg->addr);
642 break;
571 default: 643 default:
572 break; 644 break;
573 } 645 }
@@ -597,6 +669,26 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
597 r = get_user(vcpu->arch.sie_block->ckc, 669 r = get_user(vcpu->arch.sie_block->ckc,
598 (u64 __user *)reg->addr); 670 (u64 __user *)reg->addr);
599 break; 671 break;
672 case KVM_REG_S390_PFTOKEN:
673 r = get_user(vcpu->arch.pfault_token,
674 (u64 __user *)reg->addr);
675 break;
676 case KVM_REG_S390_PFCOMPARE:
677 r = get_user(vcpu->arch.pfault_compare,
678 (u64 __user *)reg->addr);
679 break;
680 case KVM_REG_S390_PFSELECT:
681 r = get_user(vcpu->arch.pfault_select,
682 (u64 __user *)reg->addr);
683 break;
684 case KVM_REG_S390_PP:
685 r = get_user(vcpu->arch.sie_block->pp,
686 (u64 __user *)reg->addr);
687 break;
688 case KVM_REG_S390_GBEA:
689 r = get_user(vcpu->arch.sie_block->gbea,
690 (u64 __user *)reg->addr);
691 break;
600 default: 692 default:
601 break; 693 break;
602 } 694 }
@@ -715,10 +807,100 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
715 return 0; 807 return 0;
716} 808}
717 809
810static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
811{
812 long rc;
813 hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
814 struct mm_struct *mm = current->mm;
815 down_read(&mm->mmap_sem);
816 rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL);
817 up_read(&mm->mmap_sem);
818 return rc;
819}
820
821static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
822 unsigned long token)
823{
824 struct kvm_s390_interrupt inti;
825 inti.parm64 = token;
826
827 if (start_token) {
828 inti.type = KVM_S390_INT_PFAULT_INIT;
829 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
830 } else {
831 inti.type = KVM_S390_INT_PFAULT_DONE;
832 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
833 }
834}
835
836void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
837 struct kvm_async_pf *work)
838{
839 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
840 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
841}
842
843void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
844 struct kvm_async_pf *work)
845{
846 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
847 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
848}
849
850void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
851 struct kvm_async_pf *work)
852{
853 /* s390 will always inject the page directly */
854}
855
856bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
857{
858 /*
859 * s390 will always inject the page directly,
860 * but we still want check_async_completion to cleanup
861 */
862 return true;
863}
864
865static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
866{
867 hva_t hva;
868 struct kvm_arch_async_pf arch;
869 int rc;
870
871 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
872 return 0;
873 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
874 vcpu->arch.pfault_compare)
875 return 0;
876 if (psw_extint_disabled(vcpu))
877 return 0;
878 if (kvm_cpu_has_interrupt(vcpu))
879 return 0;
880 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
881 return 0;
882 if (!vcpu->arch.gmap->pfault_enabled)
883 return 0;
884
885 hva = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
886 if (copy_from_guest(vcpu, &arch.pfault_token, vcpu->arch.pfault_token, 8))
887 return 0;
888
889 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
890 return rc;
891}
892
718static int vcpu_pre_run(struct kvm_vcpu *vcpu) 893static int vcpu_pre_run(struct kvm_vcpu *vcpu)
719{ 894{
720 int rc, cpuflags; 895 int rc, cpuflags;
721 896
897 /*
898 * On s390 notifications for arriving pages will be delivered directly
899 * to the guest but the house keeping for completed pfaults is
900 * handled outside the worker.
901 */
902 kvm_check_async_pf_completion(vcpu);
903
722 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); 904 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
723 905
724 if (need_resched()) 906 if (need_resched())
@@ -744,7 +926,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
744 926
745static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) 927static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
746{ 928{
747 int rc; 929 int rc = -1;
748 930
749 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 931 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
750 vcpu->arch.sie_block->icptcode); 932 vcpu->arch.sie_block->icptcode);
@@ -758,7 +940,16 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
758 current->thread.gmap_addr; 940 current->thread.gmap_addr;
759 vcpu->run->s390_ucontrol.pgm_code = 0x10; 941 vcpu->run->s390_ucontrol.pgm_code = 0x10;
760 rc = -EREMOTE; 942 rc = -EREMOTE;
761 } else { 943
944 } else if (current->thread.gmap_pfault) {
945 trace_kvm_s390_major_guest_pfault(vcpu);
946 current->thread.gmap_pfault = 0;
947 if (kvm_arch_setup_async_pf(vcpu) ||
948 (kvm_arch_fault_in_sync(vcpu) >= 0))
949 rc = 0;
950 }
951
952 if (rc == -1) {
762 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 953 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
763 trace_kvm_s390_sie_fault(vcpu); 954 trace_kvm_s390_sie_fault(vcpu);
764 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 955 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
@@ -768,7 +959,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
768 959
769 if (rc == 0) { 960 if (rc == 0) {
770 if (kvm_is_ucontrol(vcpu->kvm)) 961 if (kvm_is_ucontrol(vcpu->kvm))
771 rc = -EOPNOTSUPP; 962 /* Don't exit for host interrupts. */
963 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
772 else 964 else
773 rc = kvm_handle_sie_intercept(vcpu); 965 rc = kvm_handle_sie_intercept(vcpu);
774 } 966 }
@@ -831,8 +1023,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
831 1023
832 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 1024 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
833 1025
834 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
835
836 switch (kvm_run->exit_reason) { 1026 switch (kvm_run->exit_reason) {
837 case KVM_EXIT_S390_SIEIC: 1027 case KVM_EXIT_S390_SIEIC:
838 case KVM_EXIT_UNKNOWN: 1028 case KVM_EXIT_UNKNOWN:
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 564514f410f4..3c1e2274d9ea 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -129,6 +129,7 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
129void kvm_s390_tasklet(unsigned long parm); 129void kvm_s390_tasklet(unsigned long parm);
130void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); 130void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
131void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); 131void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu);
132void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
132int __must_check kvm_s390_inject_vm(struct kvm *kvm, 133int __must_check kvm_s390_inject_vm(struct kvm *kvm,
133 struct kvm_s390_interrupt *s390int); 134 struct kvm_s390_interrupt *s390int);
134int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 135int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
@@ -136,6 +137,7 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
136int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); 137int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
137struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, 138struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
138 u64 cr6, u64 schid); 139 u64 cr6, u64 schid);
140int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
139 141
140/* implemented in priv.c */ 142/* implemented in priv.c */
141int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); 143int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
@@ -161,4 +163,9 @@ bool kvm_enabled_cmma(void);
161/* implemented in diag.c */ 163/* implemented in diag.c */
162int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); 164int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
163 165
166/* implemented in interrupt.c */
167int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
168int psw_extint_disabled(struct kvm_vcpu *vcpu);
169void kvm_s390_destroy_adapters(struct kvm *kvm);
170
164#endif 171#endif
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index aacb6b129914..476e9e218f43 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -396,15 +396,10 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
396 396
397static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) 397static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
398{ 398{
399 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
400 int cpus = 0; 399 int cpus = 0;
401 int n; 400 int n;
402 401
403 spin_lock(&fi->lock); 402 cpus = atomic_read(&vcpu->kvm->online_vcpus);
404 for (n = 0; n < KVM_MAX_VCPUS; n++)
405 if (fi->local_int[n])
406 cpus++;
407 spin_unlock(&fi->lock);
408 403
409 /* deal with other level 3 hypervisors */ 404 /* deal with other level 3 hypervisors */
410 if (stsi(mem, 3, 2, 2)) 405 if (stsi(mem, 3, 2, 2))
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 87c2b3a3bd3e..26caeb530a78 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -23,29 +23,30 @@
23static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, 23static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
24 u64 *reg) 24 u64 *reg)
25{ 25{
26 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 26 struct kvm_s390_local_interrupt *li;
27 struct kvm_vcpu *dst_vcpu = NULL;
28 int cpuflags;
27 int rc; 29 int rc;
28 30
29 if (cpu_addr >= KVM_MAX_VCPUS) 31 if (cpu_addr >= KVM_MAX_VCPUS)
30 return SIGP_CC_NOT_OPERATIONAL; 32 return SIGP_CC_NOT_OPERATIONAL;
31 33
32 spin_lock(&fi->lock); 34 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
33 if (fi->local_int[cpu_addr] == NULL) 35 if (!dst_vcpu)
34 rc = SIGP_CC_NOT_OPERATIONAL; 36 return SIGP_CC_NOT_OPERATIONAL;
35 else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags) 37 li = &dst_vcpu->arch.local_int;
36 & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED))) 38
39 cpuflags = atomic_read(li->cpuflags);
40 if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
37 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 41 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
38 else { 42 else {
39 *reg &= 0xffffffff00000000UL; 43 *reg &= 0xffffffff00000000UL;
40 if (atomic_read(fi->local_int[cpu_addr]->cpuflags) 44 if (cpuflags & CPUSTAT_ECALL_PEND)
41 & CPUSTAT_ECALL_PEND)
42 *reg |= SIGP_STATUS_EXT_CALL_PENDING; 45 *reg |= SIGP_STATUS_EXT_CALL_PENDING;
43 if (atomic_read(fi->local_int[cpu_addr]->cpuflags) 46 if (cpuflags & CPUSTAT_STOPPED)
44 & CPUSTAT_STOPPED)
45 *reg |= SIGP_STATUS_STOPPED; 47 *reg |= SIGP_STATUS_STOPPED;
46 rc = SIGP_CC_STATUS_STORED; 48 rc = SIGP_CC_STATUS_STORED;
47 } 49 }
48 spin_unlock(&fi->lock);
49 50
50 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); 51 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
51 return rc; 52 return rc;
@@ -53,12 +54,13 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
53 54
54static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) 55static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
55{ 56{
56 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
57 struct kvm_s390_local_interrupt *li; 57 struct kvm_s390_local_interrupt *li;
58 struct kvm_s390_interrupt_info *inti; 58 struct kvm_s390_interrupt_info *inti;
59 int rc; 59 struct kvm_vcpu *dst_vcpu = NULL;
60 60
61 if (cpu_addr >= KVM_MAX_VCPUS) 61 if (cpu_addr < KVM_MAX_VCPUS)
62 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
63 if (!dst_vcpu)
62 return SIGP_CC_NOT_OPERATIONAL; 64 return SIGP_CC_NOT_OPERATIONAL;
63 65
64 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 66 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
@@ -68,13 +70,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
68 inti->type = KVM_S390_INT_EMERGENCY; 70 inti->type = KVM_S390_INT_EMERGENCY;
69 inti->emerg.code = vcpu->vcpu_id; 71 inti->emerg.code = vcpu->vcpu_id;
70 72
71 spin_lock(&fi->lock); 73 li = &dst_vcpu->arch.local_int;
72 li = fi->local_int[cpu_addr];
73 if (li == NULL) {
74 rc = SIGP_CC_NOT_OPERATIONAL;
75 kfree(inti);
76 goto unlock;
77 }
78 spin_lock_bh(&li->lock); 74 spin_lock_bh(&li->lock);
79 list_add_tail(&inti->list, &li->list); 75 list_add_tail(&inti->list, &li->list);
80 atomic_set(&li->active, 1); 76 atomic_set(&li->active, 1);
@@ -82,11 +78,9 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
82 if (waitqueue_active(li->wq)) 78 if (waitqueue_active(li->wq))
83 wake_up_interruptible(li->wq); 79 wake_up_interruptible(li->wq);
84 spin_unlock_bh(&li->lock); 80 spin_unlock_bh(&li->lock);
85 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
86 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); 81 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
87unlock: 82
88 spin_unlock(&fi->lock); 83 return SIGP_CC_ORDER_CODE_ACCEPTED;
89 return rc;
90} 84}
91 85
92static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, 86static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
@@ -122,12 +116,13 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
122 116
123static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) 117static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
124{ 118{
125 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
126 struct kvm_s390_local_interrupt *li; 119 struct kvm_s390_local_interrupt *li;
127 struct kvm_s390_interrupt_info *inti; 120 struct kvm_s390_interrupt_info *inti;
128 int rc; 121 struct kvm_vcpu *dst_vcpu = NULL;
129 122
130 if (cpu_addr >= KVM_MAX_VCPUS) 123 if (cpu_addr < KVM_MAX_VCPUS)
124 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
125 if (!dst_vcpu)
131 return SIGP_CC_NOT_OPERATIONAL; 126 return SIGP_CC_NOT_OPERATIONAL;
132 127
133 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 128 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
@@ -137,13 +132,7 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
137 inti->type = KVM_S390_INT_EXTERNAL_CALL; 132 inti->type = KVM_S390_INT_EXTERNAL_CALL;
138 inti->extcall.code = vcpu->vcpu_id; 133 inti->extcall.code = vcpu->vcpu_id;
139 134
140 spin_lock(&fi->lock); 135 li = &dst_vcpu->arch.local_int;
141 li = fi->local_int[cpu_addr];
142 if (li == NULL) {
143 rc = SIGP_CC_NOT_OPERATIONAL;
144 kfree(inti);
145 goto unlock;
146 }
147 spin_lock_bh(&li->lock); 136 spin_lock_bh(&li->lock);
148 list_add_tail(&inti->list, &li->list); 137 list_add_tail(&inti->list, &li->list);
149 atomic_set(&li->active, 1); 138 atomic_set(&li->active, 1);
@@ -151,11 +140,9 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
151 if (waitqueue_active(li->wq)) 140 if (waitqueue_active(li->wq))
152 wake_up_interruptible(li->wq); 141 wake_up_interruptible(li->wq);
153 spin_unlock_bh(&li->lock); 142 spin_unlock_bh(&li->lock);
154 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
155 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); 143 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
156unlock: 144
157 spin_unlock(&fi->lock); 145 return SIGP_CC_ORDER_CODE_ACCEPTED;
158 return rc;
159} 146}
160 147
161static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) 148static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
@@ -189,31 +176,26 @@ out:
189 176
190static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) 177static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
191{ 178{
192 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
193 struct kvm_s390_local_interrupt *li; 179 struct kvm_s390_local_interrupt *li;
180 struct kvm_vcpu *dst_vcpu = NULL;
194 int rc; 181 int rc;
195 182
196 if (cpu_addr >= KVM_MAX_VCPUS) 183 if (cpu_addr >= KVM_MAX_VCPUS)
197 return SIGP_CC_NOT_OPERATIONAL; 184 return SIGP_CC_NOT_OPERATIONAL;
198 185
199 spin_lock(&fi->lock); 186 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
200 li = fi->local_int[cpu_addr]; 187 if (!dst_vcpu)
201 if (li == NULL) { 188 return SIGP_CC_NOT_OPERATIONAL;
202 rc = SIGP_CC_NOT_OPERATIONAL; 189 li = &dst_vcpu->arch.local_int;
203 goto unlock;
204 }
205 190
206 rc = __inject_sigp_stop(li, action); 191 rc = __inject_sigp_stop(li, action);
207 192
208unlock:
209 spin_unlock(&fi->lock);
210 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); 193 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
211 194
212 if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) { 195 if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
213 /* If the CPU has already been stopped, we still have 196 /* If the CPU has already been stopped, we still have
214 * to save the status when doing stop-and-store. This 197 * to save the status when doing stop-and-store. This
215 * has to be done after unlocking all spinlocks. */ 198 * has to be done after unlocking all spinlocks. */
216 struct kvm_vcpu *dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
217 rc = kvm_s390_store_status_unloaded(dst_vcpu, 199 rc = kvm_s390_store_status_unloaded(dst_vcpu,
218 KVM_S390_STORE_STATUS_NOADDR); 200 KVM_S390_STORE_STATUS_NOADDR);
219 } 201 }
@@ -224,6 +206,8 @@ unlock:
224static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) 206static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
225{ 207{
226 int rc; 208 int rc;
209 unsigned int i;
210 struct kvm_vcpu *v;
227 211
228 switch (parameter & 0xff) { 212 switch (parameter & 0xff) {
229 case 0: 213 case 0:
@@ -231,6 +215,11 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
231 break; 215 break;
232 case 1: 216 case 1:
233 case 2: 217 case 2:
218 kvm_for_each_vcpu(i, v, vcpu->kvm) {
219 v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
220 kvm_clear_async_pf_completion_queue(v);
221 }
222
234 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 223 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
235 break; 224 break;
236 default: 225 default:
@@ -242,12 +231,18 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
242static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, 231static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
243 u64 *reg) 232 u64 *reg)
244{ 233{
245 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 234 struct kvm_s390_local_interrupt *li;
246 struct kvm_s390_local_interrupt *li = NULL; 235 struct kvm_vcpu *dst_vcpu = NULL;
247 struct kvm_s390_interrupt_info *inti; 236 struct kvm_s390_interrupt_info *inti;
248 int rc; 237 int rc;
249 u8 tmp; 238 u8 tmp;
250 239
240 if (cpu_addr < KVM_MAX_VCPUS)
241 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
242 if (!dst_vcpu)
243 return SIGP_CC_NOT_OPERATIONAL;
244 li = &dst_vcpu->arch.local_int;
245
251 /* make sure that the new value is valid memory */ 246 /* make sure that the new value is valid memory */
252 address = address & 0x7fffe000u; 247 address = address & 0x7fffe000u;
253 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || 248 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
@@ -261,18 +256,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
261 if (!inti) 256 if (!inti)
262 return SIGP_CC_BUSY; 257 return SIGP_CC_BUSY;
263 258
264 spin_lock(&fi->lock);
265 if (cpu_addr < KVM_MAX_VCPUS)
266 li = fi->local_int[cpu_addr];
267
268 if (li == NULL) {
269 *reg &= 0xffffffff00000000UL;
270 *reg |= SIGP_STATUS_INCORRECT_STATE;
271 rc = SIGP_CC_STATUS_STORED;
272 kfree(inti);
273 goto out_fi;
274 }
275
276 spin_lock_bh(&li->lock); 259 spin_lock_bh(&li->lock);
277 /* cpu must be in stopped state */ 260 /* cpu must be in stopped state */
278 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { 261 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
@@ -295,8 +278,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
295 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); 278 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
296out_li: 279out_li:
297 spin_unlock_bh(&li->lock); 280 spin_unlock_bh(&li->lock);
298out_fi:
299 spin_unlock(&fi->lock);
300 return rc; 281 return rc;
301} 282}
302 283
@@ -334,28 +315,26 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
334static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, 315static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
335 u64 *reg) 316 u64 *reg)
336{ 317{
318 struct kvm_s390_local_interrupt *li;
319 struct kvm_vcpu *dst_vcpu = NULL;
337 int rc; 320 int rc;
338 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
339 321
340 if (cpu_addr >= KVM_MAX_VCPUS) 322 if (cpu_addr >= KVM_MAX_VCPUS)
341 return SIGP_CC_NOT_OPERATIONAL; 323 return SIGP_CC_NOT_OPERATIONAL;
342 324
343 spin_lock(&fi->lock); 325 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
344 if (fi->local_int[cpu_addr] == NULL) 326 if (!dst_vcpu)
345 rc = SIGP_CC_NOT_OPERATIONAL; 327 return SIGP_CC_NOT_OPERATIONAL;
346 else { 328 li = &dst_vcpu->arch.local_int;
347 if (atomic_read(fi->local_int[cpu_addr]->cpuflags) 329 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
348 & CPUSTAT_RUNNING) { 330 /* running */
349 /* running */ 331 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
350 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 332 } else {
351 } else { 333 /* not running */
352 /* not running */ 334 *reg &= 0xffffffff00000000UL;
353 *reg &= 0xffffffff00000000UL; 335 *reg |= SIGP_STATUS_NOT_RUNNING;
354 *reg |= SIGP_STATUS_NOT_RUNNING; 336 rc = SIGP_CC_STATUS_STORED;
355 rc = SIGP_CC_STATUS_STORED;
356 }
357 } 337 }
358 spin_unlock(&fi->lock);
359 338
360 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr, 339 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
361 rc); 340 rc);
@@ -366,26 +345,22 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
366/* Test whether the destination CPU is available and not busy */ 345/* Test whether the destination CPU is available and not busy */
367static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) 346static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
368{ 347{
369 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
370 struct kvm_s390_local_interrupt *li; 348 struct kvm_s390_local_interrupt *li;
371 int rc = SIGP_CC_ORDER_CODE_ACCEPTED; 349 int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
350 struct kvm_vcpu *dst_vcpu = NULL;
372 351
373 if (cpu_addr >= KVM_MAX_VCPUS) 352 if (cpu_addr >= KVM_MAX_VCPUS)
374 return SIGP_CC_NOT_OPERATIONAL; 353 return SIGP_CC_NOT_OPERATIONAL;
375 354
376 spin_lock(&fi->lock); 355 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
377 li = fi->local_int[cpu_addr]; 356 if (!dst_vcpu)
378 if (li == NULL) { 357 return SIGP_CC_NOT_OPERATIONAL;
379 rc = SIGP_CC_NOT_OPERATIONAL; 358 li = &dst_vcpu->arch.local_int;
380 goto out;
381 }
382
383 spin_lock_bh(&li->lock); 359 spin_lock_bh(&li->lock);
384 if (li->action_bits & ACTION_STOP_ON_STOP) 360 if (li->action_bits & ACTION_STOP_ON_STOP)
385 rc = SIGP_CC_BUSY; 361 rc = SIGP_CC_BUSY;
386 spin_unlock_bh(&li->lock); 362 spin_unlock_bh(&li->lock);
387out: 363
388 spin_unlock(&fi->lock);
389 return rc; 364 return rc;
390} 365}
391 366
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h
index 3db76b2daed7..e8e7213d4cc5 100644
--- a/arch/s390/kvm/trace.h
+++ b/arch/s390/kvm/trace.h
@@ -30,6 +30,52 @@
30 TP_printk("%02d[%016lx-%016lx]: " p_str, __entry->id, \ 30 TP_printk("%02d[%016lx-%016lx]: " p_str, __entry->id, \
31 __entry->pswmask, __entry->pswaddr, p_args) 31 __entry->pswmask, __entry->pswaddr, p_args)
32 32
33TRACE_EVENT(kvm_s390_major_guest_pfault,
34 TP_PROTO(VCPU_PROTO_COMMON),
35 TP_ARGS(VCPU_ARGS_COMMON),
36
37 TP_STRUCT__entry(
38 VCPU_FIELD_COMMON
39 ),
40
41 TP_fast_assign(
42 VCPU_ASSIGN_COMMON
43 ),
44 VCPU_TP_PRINTK("%s", "major fault, maybe applicable for pfault")
45 );
46
47TRACE_EVENT(kvm_s390_pfault_init,
48 TP_PROTO(VCPU_PROTO_COMMON, long pfault_token),
49 TP_ARGS(VCPU_ARGS_COMMON, pfault_token),
50
51 TP_STRUCT__entry(
52 VCPU_FIELD_COMMON
53 __field(long, pfault_token)
54 ),
55
56 TP_fast_assign(
57 VCPU_ASSIGN_COMMON
58 __entry->pfault_token = pfault_token;
59 ),
60 VCPU_TP_PRINTK("init pfault token %ld", __entry->pfault_token)
61 );
62
63TRACE_EVENT(kvm_s390_pfault_done,
64 TP_PROTO(VCPU_PROTO_COMMON, long pfault_token),
65 TP_ARGS(VCPU_ARGS_COMMON, pfault_token),
66
67 TP_STRUCT__entry(
68 VCPU_FIELD_COMMON
69 __field(long, pfault_token)
70 ),
71
72 TP_fast_assign(
73 VCPU_ASSIGN_COMMON
74 __entry->pfault_token = pfault_token;
75 ),
76 VCPU_TP_PRINTK("done pfault token %ld", __entry->pfault_token)
77 );
78
33/* 79/*
34 * Tracepoints for SIE entry and exit. 80 * Tracepoints for SIE entry and exit.
35 */ 81 */