aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorDominik Dingel <dingel@linux.vnet.ibm.com>2013-10-07 11:11:48 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2014-01-30 07:11:02 -0500
commit3c038e6be0e299d4d3762d0a9a29f02de6e04991 (patch)
tree06f0c3a8debccb32b2388d57990aba5f4e363ea3 /arch/s390/kvm
parent9f2ceda49c6b8827c795731c204f6c2587886e2c (diff)
KVM: async_pf: Async page fault support on s390
This patch enables async page faults for s390 kvm guests. It provides the userspace API to enable and disable_wait this feature. The disable_wait will enforce that the feature is off by waiting on it. Also it includes the diagnose code, called by the guest to enable async page faults. The async page faults will use an already existing guest interface for this purpose, as described in "CP Programming Services (SC24-6084)". Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/Kconfig2
-rw-r--r--arch/s390/kvm/Makefile2
-rw-r--r--arch/s390/kvm/diag.c84
-rw-r--r--arch/s390/kvm/interrupt.c65
-rw-r--r--arch/s390/kvm/kvm-s390.c89
-rw-r--r--arch/s390/kvm/kvm-s390.h4
-rw-r--r--arch/s390/kvm/sigp.c7
-rw-r--r--arch/s390/kvm/trace.h46
8 files changed, 290 insertions, 9 deletions
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 70b46eacf8e1..c8bacbcd2e5b 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -23,6 +23,8 @@ config KVM
23 select ANON_INODES 23 select ANON_INODES
24 select HAVE_KVM_CPU_RELAX_INTERCEPT 24 select HAVE_KVM_CPU_RELAX_INTERCEPT
25 select HAVE_KVM_EVENTFD 25 select HAVE_KVM_EVENTFD
26 select KVM_ASYNC_PF
27 select KVM_ASYNC_PF_SYNC
26 ---help--- 28 ---help---
27 Support hosting paravirtualized guest machines using the SIE 29 Support hosting paravirtualized guest machines using the SIE
28 virtualization capability on the mainframe. This should work 30 virtualization capability on the mainframe. This should work
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 40b4c6470f88..a47d2c355f68 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -7,7 +7,7 @@
7# as published by the Free Software Foundation. 7# as published by the Free Software Foundation.
8 8
9KVM := ../../../virt/kvm 9KVM := ../../../virt/kvm
10common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o 10common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o
11 11
12ccflags-y := -Ivirt/kvm -Iarch/s390/kvm 12ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
13 13
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 8216c0e0b2e2..bf9ed34c2bcd 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -17,6 +17,7 @@
17#include "kvm-s390.h" 17#include "kvm-s390.h"
18#include "trace.h" 18#include "trace.h"
19#include "trace-s390.h" 19#include "trace-s390.h"
20#include "gaccess.h"
20 21
21static int diag_release_pages(struct kvm_vcpu *vcpu) 22static int diag_release_pages(struct kvm_vcpu *vcpu)
22{ 23{
@@ -46,6 +47,87 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
46 return 0; 47 return 0;
47} 48}
48 49
50static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
51{
52 struct prs_parm {
53 u16 code;
54 u16 subcode;
55 u16 parm_len;
56 u16 parm_version;
57 u64 token_addr;
58 u64 select_mask;
59 u64 compare_mask;
60 u64 zarch;
61 };
62 struct prs_parm parm;
63 int rc;
64 u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
65 u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
66 unsigned long hva_token = KVM_HVA_ERR_BAD;
67
68 if (vcpu->run->s.regs.gprs[rx] & 7)
69 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
70 if (copy_from_guest(vcpu, &parm, vcpu->run->s.regs.gprs[rx], sizeof(parm)))
71 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
72 if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
73 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
74
75 switch (parm.subcode) {
76 case 0: /* TOKEN */
77 if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
78 /*
79 * If the pagefault handshake is already activated,
80 * the token must not be changed. We have to return
81 * decimal 8 instead, as mandated in SC24-6084.
82 */
83 vcpu->run->s.regs.gprs[ry] = 8;
84 return 0;
85 }
86
87 if ((parm.compare_mask & parm.select_mask) != parm.compare_mask ||
88 parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
89 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
90
91 hva_token = gfn_to_hva(vcpu->kvm, gpa_to_gfn(parm.token_addr));
92 if (kvm_is_error_hva(hva_token))
93 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
94
95 vcpu->arch.pfault_token = parm.token_addr;
96 vcpu->arch.pfault_select = parm.select_mask;
97 vcpu->arch.pfault_compare = parm.compare_mask;
98 vcpu->run->s.regs.gprs[ry] = 0;
99 rc = 0;
100 break;
101 case 1: /*
102 * CANCEL
103 * Specification allows to let already pending tokens survive
104 * the cancel, therefore to reduce code complexity, we assume
105 * all outstanding tokens are already pending.
106 */
107 if (parm.token_addr || parm.select_mask ||
108 parm.compare_mask || parm.zarch)
109 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
110
111 vcpu->run->s.regs.gprs[ry] = 0;
112 /*
113 * If the pfault handling was not established or is already
114 * canceled SC24-6084 requests to return decimal 4.
115 */
116 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
117 vcpu->run->s.regs.gprs[ry] = 4;
118 else
119 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
120
121 rc = 0;
122 break;
123 default:
124 rc = -EOPNOTSUPP;
125 break;
126 }
127
128 return rc;
129}
130
49static int __diag_time_slice_end(struct kvm_vcpu *vcpu) 131static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
50{ 132{
51 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); 133 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
@@ -150,6 +232,8 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
150 return __diag_time_slice_end(vcpu); 232 return __diag_time_slice_end(vcpu);
151 case 0x9c: 233 case 0x9c:
152 return __diag_time_slice_end_directed(vcpu); 234 return __diag_time_slice_end_directed(vcpu);
235 case 0x258:
236 return __diag_page_ref_service(vcpu);
153 case 0x308: 237 case 0x308:
154 return __diag_ipl_functions(vcpu); 238 return __diag_ipl_functions(vcpu);
155 case 0x500: 239 case 0x500:
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 9c9192b5e339..1848080c3f34 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -31,7 +31,7 @@ static int is_ioint(u64 type)
31 return ((type & 0xfffe0000u) != 0xfffe0000u); 31 return ((type & 0xfffe0000u) != 0xfffe0000u);
32} 32}
33 33
34static int psw_extint_disabled(struct kvm_vcpu *vcpu) 34int psw_extint_disabled(struct kvm_vcpu *vcpu)
35{ 35{
36 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 36 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
37} 37}
@@ -78,11 +78,8 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
78 return 1; 78 return 1;
79 return 0; 79 return 0;
80 case KVM_S390_INT_SERVICE: 80 case KVM_S390_INT_SERVICE:
81 if (psw_extint_disabled(vcpu)) 81 case KVM_S390_INT_PFAULT_INIT:
82 return 0; 82 case KVM_S390_INT_PFAULT_DONE:
83 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
84 return 1;
85 return 0;
86 case KVM_S390_INT_VIRTIO: 83 case KVM_S390_INT_VIRTIO:
87 if (psw_extint_disabled(vcpu)) 84 if (psw_extint_disabled(vcpu))
88 return 0; 85 return 0;
@@ -150,6 +147,8 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
150 case KVM_S390_INT_EXTERNAL_CALL: 147 case KVM_S390_INT_EXTERNAL_CALL:
151 case KVM_S390_INT_EMERGENCY: 148 case KVM_S390_INT_EMERGENCY:
152 case KVM_S390_INT_SERVICE: 149 case KVM_S390_INT_SERVICE:
150 case KVM_S390_INT_PFAULT_INIT:
151 case KVM_S390_INT_PFAULT_DONE:
153 case KVM_S390_INT_VIRTIO: 152 case KVM_S390_INT_VIRTIO:
154 if (psw_extint_disabled(vcpu)) 153 if (psw_extint_disabled(vcpu))
155 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 154 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
@@ -223,6 +222,30 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
223 rc |= put_guest(vcpu, inti->ext.ext_params, 222 rc |= put_guest(vcpu, inti->ext.ext_params,
224 (u32 __user *)__LC_EXT_PARAMS); 223 (u32 __user *)__LC_EXT_PARAMS);
225 break; 224 break;
225 case KVM_S390_INT_PFAULT_INIT:
226 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
227 inti->ext.ext_params2);
228 rc = put_guest(vcpu, 0x2603, (u16 __user *) __LC_EXT_INT_CODE);
229 rc |= put_guest(vcpu, 0x0600, (u16 __user *) __LC_EXT_CPU_ADDR);
230 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
231 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
232 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
233 __LC_EXT_NEW_PSW, sizeof(psw_t));
234 rc |= put_guest(vcpu, inti->ext.ext_params2,
235 (u64 __user *) __LC_EXT_PARAMS2);
236 break;
237 case KVM_S390_INT_PFAULT_DONE:
238 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
239 inti->ext.ext_params2);
240 rc = put_guest(vcpu, 0x2603, (u16 __user *) __LC_EXT_INT_CODE);
241 rc |= put_guest(vcpu, 0x0680, (u16 __user *) __LC_EXT_CPU_ADDR);
242 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
243 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
244 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
245 __LC_EXT_NEW_PSW, sizeof(psw_t));
246 rc |= put_guest(vcpu, inti->ext.ext_params2,
247 (u64 __user *) __LC_EXT_PARAMS2);
248 break;
226 case KVM_S390_INT_VIRTIO: 249 case KVM_S390_INT_VIRTIO:
227 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", 250 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
228 inti->ext.ext_params, inti->ext.ext_params2); 251 inti->ext.ext_params, inti->ext.ext_params2);
@@ -357,7 +380,7 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
357 return 1; 380 return 1;
358} 381}
359 382
360static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 383int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
361{ 384{
362 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 385 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
363 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 386 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
@@ -737,6 +760,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
737 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); 760 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
738 inti->ext.ext_params = s390int->parm; 761 inti->ext.ext_params = s390int->parm;
739 break; 762 break;
763 case KVM_S390_INT_PFAULT_DONE:
764 inti->type = s390int->type;
765 inti->ext.ext_params2 = s390int->parm64;
766 break;
740 case KVM_S390_MCHK: 767 case KVM_S390_MCHK:
741 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", 768 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
742 s390int->parm64); 769 s390int->parm64);
@@ -823,6 +850,10 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
823 inti->type = s390int->type; 850 inti->type = s390int->type;
824 inti->mchk.mcic = s390int->parm64; 851 inti->mchk.mcic = s390int->parm64;
825 break; 852 break;
853 case KVM_S390_INT_PFAULT_INIT:
854 inti->type = s390int->type;
855 inti->ext.ext_params2 = s390int->parm64;
856 break;
826 case KVM_S390_INT_VIRTIO: 857 case KVM_S390_INT_VIRTIO:
827 case KVM_S390_INT_SERVICE: 858 case KVM_S390_INT_SERVICE:
828 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 859 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
@@ -877,6 +908,8 @@ static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
877 908
878 irq.type = inti->type; 909 irq.type = inti->type;
879 switch (inti->type) { 910 switch (inti->type) {
911 case KVM_S390_INT_PFAULT_INIT:
912 case KVM_S390_INT_PFAULT_DONE:
880 case KVM_S390_INT_VIRTIO: 913 case KVM_S390_INT_VIRTIO:
881 case KVM_S390_INT_SERVICE: 914 case KVM_S390_INT_SERVICE:
882 irq.u.ext = inti->ext; 915 irq.u.ext = inti->ext;
@@ -956,6 +989,8 @@ static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
956 return -EFAULT; 989 return -EFAULT;
957 990
958 switch (inti->type) { 991 switch (inti->type) {
992 case KVM_S390_INT_PFAULT_INIT:
993 case KVM_S390_INT_PFAULT_DONE:
959 case KVM_S390_INT_VIRTIO: 994 case KVM_S390_INT_VIRTIO:
960 case KVM_S390_INT_SERVICE: 995 case KVM_S390_INT_SERVICE:
961 target = (void *) &inti->ext; 996 target = (void *) &inti->ext;
@@ -1019,6 +1054,8 @@ static int enqueue_floating_irq(struct kvm_device *dev,
1019static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1054static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1020{ 1055{
1021 int r = 0; 1056 int r = 0;
1057 unsigned int i;
1058 struct kvm_vcpu *vcpu;
1022 1059
1023 switch (attr->group) { 1060 switch (attr->group) {
1024 case KVM_DEV_FLIC_ENQUEUE: 1061 case KVM_DEV_FLIC_ENQUEUE:
@@ -1028,6 +1065,20 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1028 r = 0; 1065 r = 0;
1029 clear_floating_interrupts(dev->kvm); 1066 clear_floating_interrupts(dev->kvm);
1030 break; 1067 break;
1068 case KVM_DEV_FLIC_APF_ENABLE:
1069 dev->kvm->arch.gmap->pfault_enabled = 1;
1070 break;
1071 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1072 dev->kvm->arch.gmap->pfault_enabled = 0;
1073 /*
1074 * Make sure no async faults are in transition when
1075 * clearing the queues. So we don't need to worry
1076 * about late coming workers.
1077 */
1078 synchronize_srcu(&dev->kvm->srcu);
1079 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1080 kvm_clear_async_pf_completion_queue(vcpu);
1081 break;
1031 default: 1082 default:
1032 r = -EINVAL; 1083 r = -EINVAL;
1033 } 1084 }
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 9eec794caa7f..d8e9f04977db 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -152,6 +152,7 @@ int kvm_dev_ioctl_check_extension(long ext)
152#ifdef CONFIG_KVM_S390_UCONTROL 152#ifdef CONFIG_KVM_S390_UCONTROL
153 case KVM_CAP_S390_UCONTROL: 153 case KVM_CAP_S390_UCONTROL:
154#endif 154#endif
155 case KVM_CAP_ASYNC_PF:
155 case KVM_CAP_SYNC_REGS: 156 case KVM_CAP_SYNC_REGS:
156 case KVM_CAP_ONE_REG: 157 case KVM_CAP_ONE_REG:
157 case KVM_CAP_ENABLE_CAP: 158 case KVM_CAP_ENABLE_CAP:
@@ -273,6 +274,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
273{ 274{
274 VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 275 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
275 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); 276 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
277 kvm_clear_async_pf_completion_queue(vcpu);
276 if (!kvm_is_ucontrol(vcpu->kvm)) { 278 if (!kvm_is_ucontrol(vcpu->kvm)) {
277 clear_bit(63 - vcpu->vcpu_id, 279 clear_bit(63 - vcpu->vcpu_id,
278 (unsigned long *) &vcpu->kvm->arch.sca->mcn); 280 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
@@ -322,6 +324,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
322/* Section: vcpu related */ 324/* Section: vcpu related */
323int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 325int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
324{ 326{
327 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
328 kvm_clear_async_pf_completion_queue(vcpu);
325 if (kvm_is_ucontrol(vcpu->kvm)) { 329 if (kvm_is_ucontrol(vcpu->kvm)) {
326 vcpu->arch.gmap = gmap_alloc(current->mm); 330 vcpu->arch.gmap = gmap_alloc(current->mm);
327 if (!vcpu->arch.gmap) 331 if (!vcpu->arch.gmap)
@@ -382,6 +386,8 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
382 vcpu->arch.guest_fpregs.fpc = 0; 386 vcpu->arch.guest_fpregs.fpc = 0;
383 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); 387 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
384 vcpu->arch.sie_block->gbea = 1; 388 vcpu->arch.sie_block->gbea = 1;
389 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
390 kvm_clear_async_pf_completion_queue(vcpu);
385 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 391 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
386} 392}
387 393
@@ -713,10 +719,89 @@ static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
713 return rc; 719 return rc;
714} 720}
715 721
722static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
723 unsigned long token)
724{
725 struct kvm_s390_interrupt inti;
726 inti.parm64 = token;
727
728 if (start_token) {
729 inti.type = KVM_S390_INT_PFAULT_INIT;
730 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
731 } else {
732 inti.type = KVM_S390_INT_PFAULT_DONE;
733 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
734 }
735}
736
737void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
738 struct kvm_async_pf *work)
739{
740 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
741 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
742}
743
744void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
745 struct kvm_async_pf *work)
746{
747 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
748 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
749}
750
751void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
752 struct kvm_async_pf *work)
753{
754 /* s390 will always inject the page directly */
755}
756
757bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
758{
759 /*
760 * s390 will always inject the page directly,
761 * but we still want check_async_completion to cleanup
762 */
763 return true;
764}
765
766static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
767{
768 hva_t hva;
769 struct kvm_arch_async_pf arch;
770 int rc;
771
772 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
773 return 0;
774 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
775 vcpu->arch.pfault_compare)
776 return 0;
777 if (psw_extint_disabled(vcpu))
778 return 0;
779 if (kvm_cpu_has_interrupt(vcpu))
780 return 0;
781 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
782 return 0;
783 if (!vcpu->arch.gmap->pfault_enabled)
784 return 0;
785
786 hva = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
787 if (copy_from_guest(vcpu, &arch.pfault_token, vcpu->arch.pfault_token, 8))
788 return 0;
789
790 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
791 return rc;
792}
793
716static int vcpu_pre_run(struct kvm_vcpu *vcpu) 794static int vcpu_pre_run(struct kvm_vcpu *vcpu)
717{ 795{
718 int rc, cpuflags; 796 int rc, cpuflags;
719 797
798 /*
799 * On s390 notifications for arriving pages will be delivered directly
800 * to the guest but the house keeping for completed pfaults is
801 * handled outside the worker.
802 */
803 kvm_check_async_pf_completion(vcpu);
804
720 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); 805 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
721 806
722 if (need_resched()) 807 if (need_resched())
@@ -758,8 +843,10 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
758 rc = -EREMOTE; 843 rc = -EREMOTE;
759 844
760 } else if (current->thread.gmap_pfault) { 845 } else if (current->thread.gmap_pfault) {
846 trace_kvm_s390_major_guest_pfault(vcpu);
761 current->thread.gmap_pfault = 0; 847 current->thread.gmap_pfault = 0;
762 if (kvm_arch_fault_in_sync(vcpu) >= 0) 848 if (kvm_arch_setup_async_pf(vcpu) ||
849 (kvm_arch_fault_in_sync(vcpu) >= 0))
763 rc = 0; 850 rc = 0;
764 } 851 }
765 852
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index f9559b0bd620..ed4750a5bc3c 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -159,4 +159,8 @@ void exit_sie_sync(struct kvm_vcpu *vcpu);
159/* implemented in diag.c */ 159/* implemented in diag.c */
160int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); 160int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
161 161
162/* implemented in interrupt.c */
163int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
164int psw_extint_disabled(struct kvm_vcpu *vcpu);
165
162#endif 166#endif
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 87c2b3a3bd3e..fe9442d39f0e 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -224,6 +224,8 @@ unlock:
224static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) 224static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
225{ 225{
226 int rc; 226 int rc;
227 unsigned int i;
228 struct kvm_vcpu *v;
227 229
228 switch (parameter & 0xff) { 230 switch (parameter & 0xff) {
229 case 0: 231 case 0:
@@ -231,6 +233,11 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
231 break; 233 break;
232 case 1: 234 case 1:
233 case 2: 235 case 2:
236 kvm_for_each_vcpu(i, v, vcpu->kvm) {
237 v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
238 kvm_clear_async_pf_completion_queue(v);
239 }
240
234 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 241 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
235 break; 242 break;
236 default: 243 default:
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h
index 3db76b2daed7..e8e7213d4cc5 100644
--- a/arch/s390/kvm/trace.h
+++ b/arch/s390/kvm/trace.h
@@ -30,6 +30,52 @@
30 TP_printk("%02d[%016lx-%016lx]: " p_str, __entry->id, \ 30 TP_printk("%02d[%016lx-%016lx]: " p_str, __entry->id, \
31 __entry->pswmask, __entry->pswaddr, p_args) 31 __entry->pswmask, __entry->pswaddr, p_args)
32 32
33TRACE_EVENT(kvm_s390_major_guest_pfault,
34 TP_PROTO(VCPU_PROTO_COMMON),
35 TP_ARGS(VCPU_ARGS_COMMON),
36
37 TP_STRUCT__entry(
38 VCPU_FIELD_COMMON
39 ),
40
41 TP_fast_assign(
42 VCPU_ASSIGN_COMMON
43 ),
44 VCPU_TP_PRINTK("%s", "major fault, maybe applicable for pfault")
45 );
46
47TRACE_EVENT(kvm_s390_pfault_init,
48 TP_PROTO(VCPU_PROTO_COMMON, long pfault_token),
49 TP_ARGS(VCPU_ARGS_COMMON, pfault_token),
50
51 TP_STRUCT__entry(
52 VCPU_FIELD_COMMON
53 __field(long, pfault_token)
54 ),
55
56 TP_fast_assign(
57 VCPU_ASSIGN_COMMON
58 __entry->pfault_token = pfault_token;
59 ),
60 VCPU_TP_PRINTK("init pfault token %ld", __entry->pfault_token)
61 );
62
63TRACE_EVENT(kvm_s390_pfault_done,
64 TP_PROTO(VCPU_PROTO_COMMON, long pfault_token),
65 TP_ARGS(VCPU_ARGS_COMMON, pfault_token),
66
67 TP_STRUCT__entry(
68 VCPU_FIELD_COMMON
69 __field(long, pfault_token)
70 ),
71
72 TP_fast_assign(
73 VCPU_ASSIGN_COMMON
74 __entry->pfault_token = pfault_token;
75 ),
76 VCPU_TP_PRINTK("done pfault token %ld", __entry->pfault_token)
77 );
78
33/* 79/*
34 * Tracepoints for SIE entry and exit. 80 * Tracepoints for SIE entry and exit.
35 */ 81 */