aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/virtual/kvm/api.txt22
-rw-r--r--Documentation/virtual/kvm/devices/vm.txt14
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/mips/kvm/mips.c3
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/s390/include/asm/kvm_host.h28
-rw-r--r--arch/s390/include/asm/sclp.h4
-rw-r--r--arch/s390/include/uapi/asm/kvm.h16
-rw-r--r--arch/s390/kvm/intercept.c41
-rw-r--r--arch/s390/kvm/interrupt.c189
-rw-r--r--arch/s390/kvm/kvm-s390.c324
-rw-r--r--arch/s390/kvm/kvm-s390.h6
-rw-r--r--arch/s390/kvm/sigp.c160
-rw-r--r--arch/s390/kvm/trace-s390.h14
-rw-r--r--arch/x86/kvm/x86.c10
-rw-r--r--drivers/s390/char/sclp_early.c8
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/uapi/linux/kvm.h7
18 files changed, 656 insertions, 198 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index f4b19d78782b..b112efc816f1 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2315,7 +2315,7 @@ struct kvm_s390_interrupt {
2315 2315
2316type can be one of the following: 2316type can be one of the following:
2317 2317
2318KVM_S390_SIGP_STOP (vcpu) - sigp restart 2318KVM_S390_SIGP_STOP (vcpu) - sigp stop; optional flags in parm
2319KVM_S390_PROGRAM_INT (vcpu) - program check; code in parm 2319KVM_S390_PROGRAM_INT (vcpu) - program check; code in parm
2320KVM_S390_SIGP_SET_PREFIX (vcpu) - sigp set prefix; prefix address in parm 2320KVM_S390_SIGP_SET_PREFIX (vcpu) - sigp set prefix; prefix address in parm
2321KVM_S390_RESTART (vcpu) - restart 2321KVM_S390_RESTART (vcpu) - restart
@@ -3228,3 +3228,23 @@ userspace from doing that.
3228If the hcall number specified is not one that has an in-kernel 3228If the hcall number specified is not one that has an in-kernel
3229implementation, the KVM_ENABLE_CAP ioctl will fail with an EINVAL 3229implementation, the KVM_ENABLE_CAP ioctl will fail with an EINVAL
3230error. 3230error.
3231
32327.2 KVM_CAP_S390_USER_SIGP
3233
3234Architectures: s390
3235Parameters: none
3236
3237This capability controls which SIGP orders will be handled completely in user
3238space. With this capability enabled, all fast orders will be handled completely
3239in the kernel:
3240- SENSE
3241- SENSE RUNNING
3242- EXTERNAL CALL
3243- EMERGENCY SIGNAL
3244- CONDITIONAL EMERGENCY SIGNAL
3245
3246All other orders will be handled completely in user space.
3247
3248Only privileged operation exceptions will be checked for in the kernel (or even
3249in the hardware prior to interception). If this capability is not enabled, the
3250old way of handling SIGP orders is used (partially in kernel and user space).
diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virtual/kvm/devices/vm.txt
index d426fc87fe93..c3b17c61b7dd 100644
--- a/Documentation/virtual/kvm/devices/vm.txt
+++ b/Documentation/virtual/kvm/devices/vm.txt
@@ -24,3 +24,17 @@ Returns: 0
24 24
25Clear the CMMA status for all guest pages, so any pages the guest marked 25Clear the CMMA status for all guest pages, so any pages the guest marked
26as unused are again used any may not be reclaimed by the host. 26as unused are again used any may not be reclaimed by the host.
27
281.3. ATTRIBUTE KVM_S390_VM_MEM_LIMIT_SIZE
29Parameters: in attr->addr the address for the new limit of guest memory
30Returns: -EFAULT if the given address is not accessible
31 -EINVAL if the virtual machine is of type UCONTROL
32 -E2BIG if the given guest memory is to big for that machine
33 -EBUSY if a vcpu is already defined
34 -ENOMEM if not enough memory is available for a new shadow guest mapping
35 0 otherwise
36
37Allows userspace to query the actual limit and set a new limit for
38the maximum guest memory size. The limit will be rounded up to
392048 MB, 4096 GB, 8192 TB respectively, as this limit is governed by
40the number of page table levels.
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 6fbfa5fff05d..b6358cab3ebf 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -249,9 +249,8 @@ out:
249 return ERR_PTR(err); 249 return ERR_PTR(err);
250} 250}
251 251
252int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 252void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
253{ 253{
254 return 0;
255} 254}
256 255
257void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 256void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index e3b21e51ff7e..7082481cd108 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -832,9 +832,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
832 return -ENOIOCTLCMD; 832 return -ENOIOCTLCMD;
833} 833}
834 834
835int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 835void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
836{ 836{
837 return 0;
838} 837}
839 838
840int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 839int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index c45eaab752b0..27c0face86f4 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -623,9 +623,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
623 return vcpu; 623 return vcpu;
624} 624}
625 625
626int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 626void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
627{ 627{
628 return 0;
629} 628}
630 629
631void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 630void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 9cba74d5d853..d1ecc7fd0579 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -35,11 +35,13 @@
35#define KVM_NR_IRQCHIPS 1 35#define KVM_NR_IRQCHIPS 1
36#define KVM_IRQCHIP_NUM_PINS 4096 36#define KVM_IRQCHIP_NUM_PINS 4096
37 37
38#define SIGP_CTRL_C 0x00800000 38#define SIGP_CTRL_C 0x80
39#define SIGP_CTRL_SCN_MASK 0x3f
39 40
40struct sca_entry { 41struct sca_entry {
41 atomic_t ctrl; 42 __u8 reserved0;
42 __u32 reserved; 43 __u8 sigp_ctrl;
44 __u16 reserved[3];
43 __u64 sda; 45 __u64 sda;
44 __u64 reserved2[2]; 46 __u64 reserved2[2];
45} __attribute__((packed)); 47} __attribute__((packed));
@@ -132,7 +134,9 @@ struct kvm_s390_sie_block {
132 __u8 reserved60; /* 0x0060 */ 134 __u8 reserved60; /* 0x0060 */
133 __u8 ecb; /* 0x0061 */ 135 __u8 ecb; /* 0x0061 */
134 __u8 ecb2; /* 0x0062 */ 136 __u8 ecb2; /* 0x0062 */
135 __u8 reserved63[1]; /* 0x0063 */ 137#define ECB3_AES 0x04
138#define ECB3_DEA 0x08
139 __u8 ecb3; /* 0x0063 */
136 __u32 scaol; /* 0x0064 */ 140 __u32 scaol; /* 0x0064 */
137 __u8 reserved68[4]; /* 0x0068 */ 141 __u8 reserved68[4]; /* 0x0068 */
138 __u32 todpr; /* 0x006c */ 142 __u32 todpr; /* 0x006c */
@@ -378,14 +382,11 @@ struct kvm_s390_interrupt_info {
378 struct kvm_s390_emerg_info emerg; 382 struct kvm_s390_emerg_info emerg;
379 struct kvm_s390_extcall_info extcall; 383 struct kvm_s390_extcall_info extcall;
380 struct kvm_s390_prefix_info prefix; 384 struct kvm_s390_prefix_info prefix;
385 struct kvm_s390_stop_info stop;
381 struct kvm_s390_mchk_info mchk; 386 struct kvm_s390_mchk_info mchk;
382 }; 387 };
383}; 388};
384 389
385/* for local_interrupt.action_flags */
386#define ACTION_STORE_ON_STOP (1<<0)
387#define ACTION_STOP_ON_STOP (1<<1)
388
389struct kvm_s390_irq_payload { 390struct kvm_s390_irq_payload {
390 struct kvm_s390_io_info io; 391 struct kvm_s390_io_info io;
391 struct kvm_s390_ext_info ext; 392 struct kvm_s390_ext_info ext;
@@ -393,6 +394,7 @@ struct kvm_s390_irq_payload {
393 struct kvm_s390_emerg_info emerg; 394 struct kvm_s390_emerg_info emerg;
394 struct kvm_s390_extcall_info extcall; 395 struct kvm_s390_extcall_info extcall;
395 struct kvm_s390_prefix_info prefix; 396 struct kvm_s390_prefix_info prefix;
397 struct kvm_s390_stop_info stop;
396 struct kvm_s390_mchk_info mchk; 398 struct kvm_s390_mchk_info mchk;
397}; 399};
398 400
@@ -401,7 +403,6 @@ struct kvm_s390_local_interrupt {
401 struct kvm_s390_float_interrupt *float_int; 403 struct kvm_s390_float_interrupt *float_int;
402 wait_queue_head_t *wq; 404 wait_queue_head_t *wq;
403 atomic_t *cpuflags; 405 atomic_t *cpuflags;
404 unsigned int action_bits;
405 DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS); 406 DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
406 struct kvm_s390_irq_payload irq; 407 struct kvm_s390_irq_payload irq;
407 unsigned long pending_irqs; 408 unsigned long pending_irqs;
@@ -470,7 +471,6 @@ struct kvm_vcpu_arch {
470 }; 471 };
471 struct gmap *gmap; 472 struct gmap *gmap;
472 struct kvm_guestdbg_info_arch guestdbg; 473 struct kvm_guestdbg_info_arch guestdbg;
473#define KVM_S390_PFAULT_TOKEN_INVALID (-1UL)
474 unsigned long pfault_token; 474 unsigned long pfault_token;
475 unsigned long pfault_select; 475 unsigned long pfault_select;
476 unsigned long pfault_compare; 476 unsigned long pfault_compare;
@@ -507,10 +507,14 @@ struct s390_io_adapter {
507struct kvm_s390_crypto { 507struct kvm_s390_crypto {
508 struct kvm_s390_crypto_cb *crycb; 508 struct kvm_s390_crypto_cb *crycb;
509 __u32 crycbd; 509 __u32 crycbd;
510 __u8 aes_kw;
511 __u8 dea_kw;
510}; 512};
511 513
512struct kvm_s390_crypto_cb { 514struct kvm_s390_crypto_cb {
513 __u8 reserved00[128]; /* 0x0000 */ 515 __u8 reserved00[72]; /* 0x0000 */
516 __u8 dea_wrapping_key_mask[24]; /* 0x0048 */
517 __u8 aes_wrapping_key_mask[32]; /* 0x0060 */
514}; 518};
515 519
516struct kvm_arch{ 520struct kvm_arch{
@@ -523,12 +527,14 @@ struct kvm_arch{
523 int use_irqchip; 527 int use_irqchip;
524 int use_cmma; 528 int use_cmma;
525 int user_cpu_state_ctrl; 529 int user_cpu_state_ctrl;
530 int user_sigp;
526 struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; 531 struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
527 wait_queue_head_t ipte_wq; 532 wait_queue_head_t ipte_wq;
528 int ipte_lock_count; 533 int ipte_lock_count;
529 struct mutex ipte_mutex; 534 struct mutex ipte_mutex;
530 spinlock_t start_stop_lock; 535 spinlock_t start_stop_lock;
531 struct kvm_s390_crypto crypto; 536 struct kvm_s390_crypto crypto;
537 u64 epoch;
532}; 538};
533 539
534#define KVM_HVA_ERR_BAD (-1UL) 540#define KVM_HVA_ERR_BAD (-1UL)
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 1aba89b53cb9..425e6cc240ff 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -31,7 +31,8 @@ struct sclp_cpu_entry {
31 u8 reserved0[2]; 31 u8 reserved0[2];
32 u8 : 3; 32 u8 : 3;
33 u8 siif : 1; 33 u8 siif : 1;
34 u8 : 4; 34 u8 sigpif : 1;
35 u8 : 3;
35 u8 reserved2[10]; 36 u8 reserved2[10];
36 u8 type; 37 u8 type;
37 u8 reserved1; 38 u8 reserved1;
@@ -66,6 +67,7 @@ int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
66unsigned long sclp_get_hsa_size(void); 67unsigned long sclp_get_hsa_size(void);
67void sclp_early_detect(void); 68void sclp_early_detect(void);
68int sclp_has_siif(void); 69int sclp_has_siif(void);
70int sclp_has_sigpif(void);
69unsigned int sclp_get_ibc(void); 71unsigned int sclp_get_ibc(void);
70 72
71#endif /* _ASM_S390_SCLP_H */ 73#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 48eda3ab4944..546fc3a302e5 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -57,10 +57,23 @@ struct kvm_s390_io_adapter_req {
57 57
58/* kvm attr_group on vm fd */ 58/* kvm attr_group on vm fd */
59#define KVM_S390_VM_MEM_CTRL 0 59#define KVM_S390_VM_MEM_CTRL 0
60#define KVM_S390_VM_TOD 1
61#define KVM_S390_VM_CRYPTO 2
60 62
61/* kvm attributes for mem_ctrl */ 63/* kvm attributes for mem_ctrl */
62#define KVM_S390_VM_MEM_ENABLE_CMMA 0 64#define KVM_S390_VM_MEM_ENABLE_CMMA 0
63#define KVM_S390_VM_MEM_CLR_CMMA 1 65#define KVM_S390_VM_MEM_CLR_CMMA 1
66#define KVM_S390_VM_MEM_LIMIT_SIZE 2
67
68/* kvm attributes for KVM_S390_VM_TOD */
69#define KVM_S390_VM_TOD_LOW 0
70#define KVM_S390_VM_TOD_HIGH 1
71
72/* kvm attributes for crypto */
73#define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0
74#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
75#define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2
76#define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3
64 77
65/* for KVM_GET_REGS and KVM_SET_REGS */ 78/* for KVM_GET_REGS and KVM_SET_REGS */
66struct kvm_regs { 79struct kvm_regs {
@@ -107,6 +120,9 @@ struct kvm_guest_debug_arch {
107 struct kvm_hw_breakpoint __user *hw_bp; 120 struct kvm_hw_breakpoint __user *hw_bp;
108}; 121};
109 122
123/* for KVM_SYNC_PFAULT and KVM_REG_S390_PFTOKEN */
124#define KVM_S390_PFAULT_TOKEN_INVALID 0xffffffffffffffffULL
125
110#define KVM_SYNC_PREFIX (1UL << 0) 126#define KVM_SYNC_PREFIX (1UL << 0)
111#define KVM_SYNC_GPRS (1UL << 1) 127#define KVM_SYNC_GPRS (1UL << 1)
112#define KVM_SYNC_ACRS (1UL << 2) 128#define KVM_SYNC_ACRS (1UL << 2)
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 81c77ab8102e..bebd2157edd0 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -68,18 +68,27 @@ static int handle_noop(struct kvm_vcpu *vcpu)
68 68
69static int handle_stop(struct kvm_vcpu *vcpu) 69static int handle_stop(struct kvm_vcpu *vcpu)
70{ 70{
71 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
71 int rc = 0; 72 int rc = 0;
72 unsigned int action_bits; 73 uint8_t flags, stop_pending;
73 74
74 vcpu->stat.exit_stop_request++; 75 vcpu->stat.exit_stop_request++;
75 trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
76 76
77 action_bits = vcpu->arch.local_int.action_bits; 77 /* delay the stop if any non-stop irq is pending */
78 if (kvm_s390_vcpu_has_irq(vcpu, 1))
79 return 0;
80
81 /* avoid races with the injection/SIGP STOP code */
82 spin_lock(&li->lock);
83 flags = li->irq.stop.flags;
84 stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
85 spin_unlock(&li->lock);
78 86
79 if (!(action_bits & ACTION_STOP_ON_STOP)) 87 trace_kvm_s390_stop_request(stop_pending, flags);
88 if (!stop_pending)
80 return 0; 89 return 0;
81 90
82 if (action_bits & ACTION_STORE_ON_STOP) { 91 if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) {
83 rc = kvm_s390_vcpu_store_status(vcpu, 92 rc = kvm_s390_vcpu_store_status(vcpu,
84 KVM_S390_STORE_STATUS_NOADDR); 93 KVM_S390_STORE_STATUS_NOADDR);
85 if (rc) 94 if (rc)
@@ -279,11 +288,13 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
279 irq.type = KVM_S390_INT_CPU_TIMER; 288 irq.type = KVM_S390_INT_CPU_TIMER;
280 break; 289 break;
281 case EXT_IRQ_EXTERNAL_CALL: 290 case EXT_IRQ_EXTERNAL_CALL:
282 if (kvm_s390_si_ext_call_pending(vcpu))
283 return 0;
284 irq.type = KVM_S390_INT_EXTERNAL_CALL; 291 irq.type = KVM_S390_INT_EXTERNAL_CALL;
285 irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr; 292 irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
286 break; 293 rc = kvm_s390_inject_vcpu(vcpu, &irq);
294 /* ignore if another external call is already pending */
295 if (rc == -EBUSY)
296 return 0;
297 return rc;
287 default: 298 default:
288 return -EOPNOTSUPP; 299 return -EOPNOTSUPP;
289 } 300 }
@@ -307,17 +318,19 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
307 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); 318 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
308 319
309 /* Make sure that the source is paged-in */ 320 /* Make sure that the source is paged-in */
310 srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]); 321 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
311 if (kvm_is_error_gpa(vcpu->kvm, srcaddr)) 322 &srcaddr, 0);
312 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 323 if (rc)
324 return kvm_s390_inject_prog_cond(vcpu, rc);
313 rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); 325 rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
314 if (rc != 0) 326 if (rc != 0)
315 return rc; 327 return rc;
316 328
317 /* Make sure that the destination is paged-in */ 329 /* Make sure that the destination is paged-in */
318 dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]); 330 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
319 if (kvm_is_error_gpa(vcpu->kvm, dstaddr)) 331 &dstaddr, 1);
320 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 332 if (rc)
333 return kvm_s390_inject_prog_cond(vcpu, rc);
321 rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); 334 rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
322 if (rc != 0) 335 if (rc != 0)
323 return rc; 336 return rc;
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index f00f31e66cd8..c34e1d904ff6 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -19,6 +19,7 @@
19#include <linux/bitmap.h> 19#include <linux/bitmap.h>
20#include <asm/asm-offsets.h> 20#include <asm/asm-offsets.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <asm/sclp.h>
22#include "kvm-s390.h" 23#include "kvm-s390.h"
23#include "gaccess.h" 24#include "gaccess.h"
24#include "trace-s390.h" 25#include "trace-s390.h"
@@ -159,6 +160,12 @@ static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
159 if (psw_mchk_disabled(vcpu)) 160 if (psw_mchk_disabled(vcpu))
160 active_mask &= ~IRQ_PEND_MCHK_MASK; 161 active_mask &= ~IRQ_PEND_MCHK_MASK;
161 162
163 /*
164 * STOP irqs will never be actively delivered. They are triggered via
165 * intercept requests and cleared when the stop intercept is performed.
166 */
167 __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
168
162 return active_mask; 169 return active_mask;
163} 170}
164 171
@@ -186,9 +193,6 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
186 LCTL_CR10 | LCTL_CR11); 193 LCTL_CR10 | LCTL_CR11);
187 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); 194 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
188 } 195 }
189
190 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP)
191 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
192} 196}
193 197
194static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 198static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
@@ -216,11 +220,18 @@ static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
216 vcpu->arch.sie_block->lctl |= LCTL_CR14; 220 vcpu->arch.sie_block->lctl |= LCTL_CR14;
217} 221}
218 222
223static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
224{
225 if (kvm_s390_is_stop_irq_pending(vcpu))
226 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
227}
228
219/* Set interception request for non-deliverable local interrupts */ 229/* Set interception request for non-deliverable local interrupts */
220static void set_intercept_indicators_local(struct kvm_vcpu *vcpu) 230static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
221{ 231{
222 set_intercept_indicators_ext(vcpu); 232 set_intercept_indicators_ext(vcpu);
223 set_intercept_indicators_mchk(vcpu); 233 set_intercept_indicators_mchk(vcpu);
234 set_intercept_indicators_stop(vcpu);
224} 235}
225 236
226static void __set_intercept_indicator(struct kvm_vcpu *vcpu, 237static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
@@ -392,18 +403,6 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
392 return rc ? -EFAULT : 0; 403 return rc ? -EFAULT : 0;
393} 404}
394 405
395static int __must_check __deliver_stop(struct kvm_vcpu *vcpu)
396{
397 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
398 vcpu->stat.deliver_stop_signal++;
399 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_STOP,
400 0, 0);
401
402 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
403 clear_bit(IRQ_PEND_SIGP_STOP, &vcpu->arch.local_int.pending_irqs);
404 return 0;
405}
406
407static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu) 406static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
408{ 407{
409 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 408 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -705,7 +704,6 @@ static const deliver_irq_t deliver_irq_funcs[] = {
705 [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc, 704 [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
706 [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer, 705 [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
707 [IRQ_PEND_RESTART] = __deliver_restart, 706 [IRQ_PEND_RESTART] = __deliver_restart,
708 [IRQ_PEND_SIGP_STOP] = __deliver_stop,
709 [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, 707 [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
710 [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, 708 [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
711}; 709};
@@ -738,21 +736,20 @@ static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
738 return rc; 736 return rc;
739} 737}
740 738
741/* Check whether SIGP interpretation facility has an external call pending */ 739/* Check whether an external call is pending (deliverable or not) */
742int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu) 740int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
743{ 741{
744 atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl; 742 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
743 uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
745 744
746 if (!psw_extint_disabled(vcpu) && 745 if (!sclp_has_sigpif())
747 (vcpu->arch.sie_block->gcr[0] & 0x2000ul) && 746 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
748 (atomic_read(sigp_ctrl) & SIGP_CTRL_C) &&
749 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
750 return 1;
751 747
752 return 0; 748 return (sigp_ctrl & SIGP_CTRL_C) &&
749 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND);
753} 750}
754 751
755int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 752int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
756{ 753{
757 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 754 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
758 struct kvm_s390_interrupt_info *inti; 755 struct kvm_s390_interrupt_info *inti;
@@ -773,7 +770,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
773 if (!rc && kvm_cpu_has_pending_timer(vcpu)) 770 if (!rc && kvm_cpu_has_pending_timer(vcpu))
774 rc = 1; 771 rc = 1;
775 772
776 if (!rc && kvm_s390_si_ext_call_pending(vcpu)) 773 /* external call pending and deliverable */
774 if (!rc && kvm_s390_ext_call_pending(vcpu) &&
775 !psw_extint_disabled(vcpu) &&
776 (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
777 rc = 1;
778
779 if (!rc && !exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
777 rc = 1; 780 rc = 1;
778 781
779 return rc; 782 return rc;
@@ -804,14 +807,20 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
804 return -EOPNOTSUPP; /* disabled wait */ 807 return -EOPNOTSUPP; /* disabled wait */
805 } 808 }
806 809
807 __set_cpu_idle(vcpu);
808 if (!ckc_interrupts_enabled(vcpu)) { 810 if (!ckc_interrupts_enabled(vcpu)) {
809 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 811 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
812 __set_cpu_idle(vcpu);
810 goto no_timer; 813 goto no_timer;
811 } 814 }
812 815
813 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; 816 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
814 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 817 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
818
819 /* underflow */
820 if (vcpu->arch.sie_block->ckc < now)
821 return 0;
822
823 __set_cpu_idle(vcpu);
815 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 824 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
816 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); 825 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
817no_timer: 826no_timer:
@@ -820,7 +829,7 @@ no_timer:
820 __unset_cpu_idle(vcpu); 829 __unset_cpu_idle(vcpu);
821 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 830 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
822 831
823 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 832 hrtimer_cancel(&vcpu->arch.ckc_timer);
824 return 0; 833 return 0;
825} 834}
826 835
@@ -840,10 +849,20 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
840enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 849enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
841{ 850{
842 struct kvm_vcpu *vcpu; 851 struct kvm_vcpu *vcpu;
852 u64 now, sltime;
843 853
844 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 854 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
845 kvm_s390_vcpu_wakeup(vcpu); 855 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
856 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
846 857
858 /*
859 * If the monotonic clock runs faster than the tod clock we might be
860 * woken up too early and have to go back to sleep to avoid deadlocks.
861 */
862 if (vcpu->arch.sie_block->ckc > now &&
863 hrtimer_forward_now(timer, ns_to_ktime(sltime)))
864 return HRTIMER_RESTART;
865 kvm_s390_vcpu_wakeup(vcpu);
847 return HRTIMER_NORESTART; 866 return HRTIMER_NORESTART;
848} 867}
849 868
@@ -859,8 +878,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
859 878
860 /* clear pending external calls set by sigp interpretation facility */ 879 /* clear pending external calls set by sigp interpretation facility */
861 atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); 880 atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
862 atomic_clear_mask(SIGP_CTRL_C, 881 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
863 &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
864} 882}
865 883
866int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 884int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
@@ -984,18 +1002,43 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
984 return 0; 1002 return 0;
985} 1003}
986 1004
987int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1005static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
1006{
1007 unsigned char new_val, old_val;
1008 uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
1009
1010 new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
1011 old_val = *sigp_ctrl & ~SIGP_CTRL_C;
1012 if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
1013 /* another external call is pending */
1014 return -EBUSY;
1015 }
1016 atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
1017 return 0;
1018}
1019
1020static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
988{ 1021{
989 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1022 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
990 struct kvm_s390_extcall_info *extcall = &li->irq.extcall; 1023 struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
1024 uint16_t src_id = irq->u.extcall.code;
991 1025
992 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", 1026 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
993 irq->u.extcall.code); 1027 src_id);
994 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, 1028 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
995 irq->u.extcall.code, 0, 2); 1029 src_id, 0, 2);
1030
1031 /* sending vcpu invalid */
1032 if (src_id >= KVM_MAX_VCPUS ||
1033 kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
1034 return -EINVAL;
996 1035
1036 if (sclp_has_sigpif())
1037 return __inject_extcall_sigpif(vcpu, src_id);
1038
1039 if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1040 return -EBUSY;
997 *extcall = irq->u.extcall; 1041 *extcall = irq->u.extcall;
998 set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
999 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1042 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1000 return 0; 1043 return 0;
1001} 1044}
@@ -1006,23 +1049,41 @@ static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1006 struct kvm_s390_prefix_info *prefix = &li->irq.prefix; 1049 struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1007 1050
1008 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", 1051 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
1009 prefix->address); 1052 irq->u.prefix.address);
1010 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, 1053 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1011 prefix->address, 0, 2); 1054 irq->u.prefix.address, 0, 2);
1055
1056 if (!is_vcpu_stopped(vcpu))
1057 return -EBUSY;
1012 1058
1013 *prefix = irq->u.prefix; 1059 *prefix = irq->u.prefix;
1014 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); 1060 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1015 return 0; 1061 return 0;
1016} 1062}
1017 1063
1064#define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1018static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1065static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1019{ 1066{
1020 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1067 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1068 struct kvm_s390_stop_info *stop = &li->irq.stop;
1069 int rc = 0;
1021 1070
1022 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2); 1071 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
1023 1072
1024 li->action_bits |= ACTION_STOP_ON_STOP; 1073 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1025 set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); 1074 return -EINVAL;
1075
1076 if (is_vcpu_stopped(vcpu)) {
1077 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1078 rc = kvm_s390_store_status_unloaded(vcpu,
1079 KVM_S390_STORE_STATUS_NOADDR);
1080 return rc;
1081 }
1082
1083 if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1084 return -EBUSY;
1085 stop->flags = irq->u.stop.flags;
1086 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
1026 return 0; 1087 return 0;
1027} 1088}
1028 1089
@@ -1042,14 +1103,13 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1042 struct kvm_s390_irq *irq) 1103 struct kvm_s390_irq *irq)
1043{ 1104{
1044 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1105 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1045 struct kvm_s390_emerg_info *emerg = &li->irq.emerg;
1046 1106
1047 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", 1107 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
1048 irq->u.emerg.code); 1108 irq->u.emerg.code);
1049 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 1109 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1050 emerg->code, 0, 2); 1110 irq->u.emerg.code, 0, 2);
1051 1111
1052 set_bit(emerg->code, li->sigp_emerg_pending); 1112 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1053 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 1113 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1054 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1114 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1055 return 0; 1115 return 0;
@@ -1061,9 +1121,9 @@ static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1061 struct kvm_s390_mchk_info *mchk = &li->irq.mchk; 1121 struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1062 1122
1063 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", 1123 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
1064 mchk->mcic); 1124 irq->u.mchk.mcic);
1065 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, 1125 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1066 mchk->mcic, 2); 1126 irq->u.mchk.mcic, 2);
1067 1127
1068 /* 1128 /*
1069 * Because repressible machine checks can be indicated along with 1129 * Because repressible machine checks can be indicated along with
@@ -1121,7 +1181,6 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1121 1181
1122 if ((!schid && !cr6) || (schid && cr6)) 1182 if ((!schid && !cr6) || (schid && cr6))
1123 return NULL; 1183 return NULL;
1124 mutex_lock(&kvm->lock);
1125 fi = &kvm->arch.float_int; 1184 fi = &kvm->arch.float_int;
1126 spin_lock(&fi->lock); 1185 spin_lock(&fi->lock);
1127 inti = NULL; 1186 inti = NULL;
@@ -1149,7 +1208,6 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1149 if (list_empty(&fi->list)) 1208 if (list_empty(&fi->list))
1150 atomic_set(&fi->active, 0); 1209 atomic_set(&fi->active, 0);
1151 spin_unlock(&fi->lock); 1210 spin_unlock(&fi->lock);
1152 mutex_unlock(&kvm->lock);
1153 return inti; 1211 return inti;
1154} 1212}
1155 1213
@@ -1162,7 +1220,6 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1162 int sigcpu; 1220 int sigcpu;
1163 int rc = 0; 1221 int rc = 0;
1164 1222
1165 mutex_lock(&kvm->lock);
1166 fi = &kvm->arch.float_int; 1223 fi = &kvm->arch.float_int;
1167 spin_lock(&fi->lock); 1224 spin_lock(&fi->lock);
1168 if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) { 1225 if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
@@ -1213,7 +1270,6 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1213 kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); 1270 kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
1214unlock_fi: 1271unlock_fi:
1215 spin_unlock(&fi->lock); 1272 spin_unlock(&fi->lock);
1216 mutex_unlock(&kvm->lock);
1217 return rc; 1273 return rc;
1218} 1274}
1219 1275
@@ -1221,6 +1277,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
1221 struct kvm_s390_interrupt *s390int) 1277 struct kvm_s390_interrupt *s390int)
1222{ 1278{
1223 struct kvm_s390_interrupt_info *inti; 1279 struct kvm_s390_interrupt_info *inti;
1280 int rc;
1224 1281
1225 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 1282 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1226 if (!inti) 1283 if (!inti)
@@ -1239,7 +1296,6 @@ int kvm_s390_inject_vm(struct kvm *kvm,
1239 inti->ext.ext_params = s390int->parm; 1296 inti->ext.ext_params = s390int->parm;
1240 break; 1297 break;
1241 case KVM_S390_INT_PFAULT_DONE: 1298 case KVM_S390_INT_PFAULT_DONE:
1242 inti->type = s390int->type;
1243 inti->ext.ext_params2 = s390int->parm64; 1299 inti->ext.ext_params2 = s390int->parm64;
1244 break; 1300 break;
1245 case KVM_S390_MCHK: 1301 case KVM_S390_MCHK:
@@ -1268,7 +1324,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
1268 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, 1324 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1269 2); 1325 2);
1270 1326
1271 return __inject_vm(kvm, inti); 1327 rc = __inject_vm(kvm, inti);
1328 if (rc)
1329 kfree(inti);
1330 return rc;
1272} 1331}
1273 1332
1274void kvm_s390_reinject_io_int(struct kvm *kvm, 1333void kvm_s390_reinject_io_int(struct kvm *kvm,
@@ -1290,13 +1349,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1290 case KVM_S390_SIGP_SET_PREFIX: 1349 case KVM_S390_SIGP_SET_PREFIX:
1291 irq->u.prefix.address = s390int->parm; 1350 irq->u.prefix.address = s390int->parm;
1292 break; 1351 break;
1352 case KVM_S390_SIGP_STOP:
1353 irq->u.stop.flags = s390int->parm;
1354 break;
1293 case KVM_S390_INT_EXTERNAL_CALL: 1355 case KVM_S390_INT_EXTERNAL_CALL:
1294 if (irq->u.extcall.code & 0xffff0000) 1356 if (s390int->parm & 0xffff0000)
1295 return -EINVAL; 1357 return -EINVAL;
1296 irq->u.extcall.code = s390int->parm; 1358 irq->u.extcall.code = s390int->parm;
1297 break; 1359 break;
1298 case KVM_S390_INT_EMERGENCY: 1360 case KVM_S390_INT_EMERGENCY:
1299 if (irq->u.emerg.code & 0xffff0000) 1361 if (s390int->parm & 0xffff0000)
1300 return -EINVAL; 1362 return -EINVAL;
1301 irq->u.emerg.code = s390int->parm; 1363 irq->u.emerg.code = s390int->parm;
1302 break; 1364 break;
@@ -1307,6 +1369,23 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1307 return 0; 1369 return 0;
1308} 1370}
1309 1371
1372int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1373{
1374 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1375
1376 return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1377}
1378
1379void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1380{
1381 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1382
1383 spin_lock(&li->lock);
1384 li->irq.stop.flags = 0;
1385 clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1386 spin_unlock(&li->lock);
1387}
1388
1310int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1389int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1311{ 1390{
1312 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1391 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -1363,7 +1442,6 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
1363 struct kvm_s390_float_interrupt *fi; 1442 struct kvm_s390_float_interrupt *fi;
1364 struct kvm_s390_interrupt_info *n, *inti = NULL; 1443 struct kvm_s390_interrupt_info *n, *inti = NULL;
1365 1444
1366 mutex_lock(&kvm->lock);
1367 fi = &kvm->arch.float_int; 1445 fi = &kvm->arch.float_int;
1368 spin_lock(&fi->lock); 1446 spin_lock(&fi->lock);
1369 list_for_each_entry_safe(inti, n, &fi->list, list) { 1447 list_for_each_entry_safe(inti, n, &fi->list, list) {
@@ -1373,7 +1451,6 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
1373 fi->irq_count = 0; 1451 fi->irq_count = 0;
1374 atomic_set(&fi->active, 0); 1452 atomic_set(&fi->active, 0);
1375 spin_unlock(&fi->lock); 1453 spin_unlock(&fi->lock);
1376 mutex_unlock(&kvm->lock);
1377} 1454}
1378 1455
1379static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti, 1456static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
@@ -1413,7 +1490,6 @@ static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
1413 int ret = 0; 1490 int ret = 0;
1414 int n = 0; 1491 int n = 0;
1415 1492
1416 mutex_lock(&kvm->lock);
1417 fi = &kvm->arch.float_int; 1493 fi = &kvm->arch.float_int;
1418 spin_lock(&fi->lock); 1494 spin_lock(&fi->lock);
1419 1495
@@ -1432,7 +1508,6 @@ static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
1432 } 1508 }
1433 1509
1434 spin_unlock(&fi->lock); 1510 spin_unlock(&fi->lock);
1435 mutex_unlock(&kvm->lock);
1436 1511
1437 return ret < 0 ? ret : n; 1512 return ret < 0 ? ret : n;
1438} 1513}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3e09801e3104..b2371c0fd1f8 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -22,6 +22,7 @@
22#include <linux/kvm.h> 22#include <linux/kvm.h>
23#include <linux/kvm_host.h> 23#include <linux/kvm_host.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/random.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <linux/timer.h> 27#include <linux/timer.h>
27#include <asm/asm-offsets.h> 28#include <asm/asm-offsets.h>
@@ -166,6 +167,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
166 case KVM_CAP_S390_IRQCHIP: 167 case KVM_CAP_S390_IRQCHIP:
167 case KVM_CAP_VM_ATTRIBUTES: 168 case KVM_CAP_VM_ATTRIBUTES:
168 case KVM_CAP_MP_STATE: 169 case KVM_CAP_MP_STATE:
170 case KVM_CAP_S390_USER_SIGP:
169 r = 1; 171 r = 1;
170 break; 172 break;
171 case KVM_CAP_NR_VCPUS: 173 case KVM_CAP_NR_VCPUS:
@@ -254,6 +256,10 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
254 kvm->arch.use_irqchip = 1; 256 kvm->arch.use_irqchip = 1;
255 r = 0; 257 r = 0;
256 break; 258 break;
259 case KVM_CAP_S390_USER_SIGP:
260 kvm->arch.user_sigp = 1;
261 r = 0;
262 break;
257 default: 263 default:
258 r = -EINVAL; 264 r = -EINVAL;
259 break; 265 break;
@@ -261,7 +267,24 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
261 return r; 267 return r;
262} 268}
263 269
264static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 270static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
271{
272 int ret;
273
274 switch (attr->attr) {
275 case KVM_S390_VM_MEM_LIMIT_SIZE:
276 ret = 0;
277 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
278 ret = -EFAULT;
279 break;
280 default:
281 ret = -ENXIO;
282 break;
283 }
284 return ret;
285}
286
287static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
265{ 288{
266 int ret; 289 int ret;
267 unsigned int idx; 290 unsigned int idx;
@@ -283,6 +306,190 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
283 mutex_unlock(&kvm->lock); 306 mutex_unlock(&kvm->lock);
284 ret = 0; 307 ret = 0;
285 break; 308 break;
309 case KVM_S390_VM_MEM_LIMIT_SIZE: {
310 unsigned long new_limit;
311
312 if (kvm_is_ucontrol(kvm))
313 return -EINVAL;
314
315 if (get_user(new_limit, (u64 __user *)attr->addr))
316 return -EFAULT;
317
318 if (new_limit > kvm->arch.gmap->asce_end)
319 return -E2BIG;
320
321 ret = -EBUSY;
322 mutex_lock(&kvm->lock);
323 if (atomic_read(&kvm->online_vcpus) == 0) {
324 /* gmap_alloc will round the limit up */
325 struct gmap *new = gmap_alloc(current->mm, new_limit);
326
327 if (!new) {
328 ret = -ENOMEM;
329 } else {
330 gmap_free(kvm->arch.gmap);
331 new->private = kvm;
332 kvm->arch.gmap = new;
333 ret = 0;
334 }
335 }
336 mutex_unlock(&kvm->lock);
337 break;
338 }
339 default:
340 ret = -ENXIO;
341 break;
342 }
343 return ret;
344}
345
346static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
347
348static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
349{
350 struct kvm_vcpu *vcpu;
351 int i;
352
353 if (!test_vfacility(76))
354 return -EINVAL;
355
356 mutex_lock(&kvm->lock);
357 switch (attr->attr) {
358 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
359 get_random_bytes(
360 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
361 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
362 kvm->arch.crypto.aes_kw = 1;
363 break;
364 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
365 get_random_bytes(
366 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
367 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
368 kvm->arch.crypto.dea_kw = 1;
369 break;
370 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
371 kvm->arch.crypto.aes_kw = 0;
372 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
373 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
374 break;
375 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
376 kvm->arch.crypto.dea_kw = 0;
377 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
378 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
379 break;
380 default:
381 mutex_unlock(&kvm->lock);
382 return -ENXIO;
383 }
384
385 kvm_for_each_vcpu(i, vcpu, kvm) {
386 kvm_s390_vcpu_crypto_setup(vcpu);
387 exit_sie(vcpu);
388 }
389 mutex_unlock(&kvm->lock);
390 return 0;
391}
392
393static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
394{
395 u8 gtod_high;
396
397 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
398 sizeof(gtod_high)))
399 return -EFAULT;
400
401 if (gtod_high != 0)
402 return -EINVAL;
403
404 return 0;
405}
406
407static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
408{
409 struct kvm_vcpu *cur_vcpu;
410 unsigned int vcpu_idx;
411 u64 host_tod, gtod;
412 int r;
413
414 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
415 return -EFAULT;
416
417 r = store_tod_clock(&host_tod);
418 if (r)
419 return r;
420
421 mutex_lock(&kvm->lock);
422 kvm->arch.epoch = gtod - host_tod;
423 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
424 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
425 exit_sie(cur_vcpu);
426 }
427 mutex_unlock(&kvm->lock);
428 return 0;
429}
430
431static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
432{
433 int ret;
434
435 if (attr->flags)
436 return -EINVAL;
437
438 switch (attr->attr) {
439 case KVM_S390_VM_TOD_HIGH:
440 ret = kvm_s390_set_tod_high(kvm, attr);
441 break;
442 case KVM_S390_VM_TOD_LOW:
443 ret = kvm_s390_set_tod_low(kvm, attr);
444 break;
445 default:
446 ret = -ENXIO;
447 break;
448 }
449 return ret;
450}
451
452static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
453{
454 u8 gtod_high = 0;
455
456 if (copy_to_user((void __user *)attr->addr, &gtod_high,
457 sizeof(gtod_high)))
458 return -EFAULT;
459
460 return 0;
461}
462
463static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
464{
465 u64 host_tod, gtod;
466 int r;
467
468 r = store_tod_clock(&host_tod);
469 if (r)
470 return r;
471
472 gtod = host_tod + kvm->arch.epoch;
473 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
474 return -EFAULT;
475
476 return 0;
477}
478
479static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
480{
481 int ret;
482
483 if (attr->flags)
484 return -EINVAL;
485
486 switch (attr->attr) {
487 case KVM_S390_VM_TOD_HIGH:
488 ret = kvm_s390_get_tod_high(kvm, attr);
489 break;
490 case KVM_S390_VM_TOD_LOW:
491 ret = kvm_s390_get_tod_low(kvm, attr);
492 break;
286 default: 493 default:
287 ret = -ENXIO; 494 ret = -ENXIO;
288 break; 495 break;
@@ -296,7 +503,13 @@ static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
296 503
297 switch (attr->group) { 504 switch (attr->group) {
298 case KVM_S390_VM_MEM_CTRL: 505 case KVM_S390_VM_MEM_CTRL:
299 ret = kvm_s390_mem_control(kvm, attr); 506 ret = kvm_s390_set_mem_control(kvm, attr);
507 break;
508 case KVM_S390_VM_TOD:
509 ret = kvm_s390_set_tod(kvm, attr);
510 break;
511 case KVM_S390_VM_CRYPTO:
512 ret = kvm_s390_vm_set_crypto(kvm, attr);
300 break; 513 break;
301 default: 514 default:
302 ret = -ENXIO; 515 ret = -ENXIO;
@@ -308,7 +521,21 @@ static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
308 521
309static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) 522static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
310{ 523{
311 return -ENXIO; 524 int ret;
525
526 switch (attr->group) {
527 case KVM_S390_VM_MEM_CTRL:
528 ret = kvm_s390_get_mem_control(kvm, attr);
529 break;
530 case KVM_S390_VM_TOD:
531 ret = kvm_s390_get_tod(kvm, attr);
532 break;
533 default:
534 ret = -ENXIO;
535 break;
536 }
537
538 return ret;
312} 539}
313 540
314static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) 541static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
@@ -320,6 +547,31 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
320 switch (attr->attr) { 547 switch (attr->attr) {
321 case KVM_S390_VM_MEM_ENABLE_CMMA: 548 case KVM_S390_VM_MEM_ENABLE_CMMA:
322 case KVM_S390_VM_MEM_CLR_CMMA: 549 case KVM_S390_VM_MEM_CLR_CMMA:
550 case KVM_S390_VM_MEM_LIMIT_SIZE:
551 ret = 0;
552 break;
553 default:
554 ret = -ENXIO;
555 break;
556 }
557 break;
558 case KVM_S390_VM_TOD:
559 switch (attr->attr) {
560 case KVM_S390_VM_TOD_LOW:
561 case KVM_S390_VM_TOD_HIGH:
562 ret = 0;
563 break;
564 default:
565 ret = -ENXIO;
566 break;
567 }
568 break;
569 case KVM_S390_VM_CRYPTO:
570 switch (attr->attr) {
571 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
572 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
573 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
574 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
323 ret = 0; 575 ret = 0;
324 break; 576 break;
325 default: 577 default:
@@ -414,6 +666,10 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
414 kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb | 666 kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
415 CRYCB_FORMAT1; 667 CRYCB_FORMAT1;
416 668
669 /* Disable AES/DEA protected key functions by default */
670 kvm->arch.crypto.aes_kw = 0;
671 kvm->arch.crypto.dea_kw = 0;
672
417 return 0; 673 return 0;
418} 674}
419 675
@@ -477,6 +733,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
477 733
478 kvm->arch.css_support = 0; 734 kvm->arch.css_support = 0;
479 kvm->arch.use_irqchip = 0; 735 kvm->arch.use_irqchip = 0;
736 kvm->arch.epoch = 0;
480 737
481 spin_lock_init(&kvm->arch.start_stop_lock); 738 spin_lock_init(&kvm->arch.start_stop_lock);
482 739
@@ -546,25 +803,30 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
546} 803}
547 804
548/* Section: vcpu related */ 805/* Section: vcpu related */
806static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
807{
808 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
809 if (!vcpu->arch.gmap)
810 return -ENOMEM;
811 vcpu->arch.gmap->private = vcpu->kvm;
812
813 return 0;
814}
815
549int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 816int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
550{ 817{
551 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 818 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
552 kvm_clear_async_pf_completion_queue(vcpu); 819 kvm_clear_async_pf_completion_queue(vcpu);
553 if (kvm_is_ucontrol(vcpu->kvm)) {
554 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
555 if (!vcpu->arch.gmap)
556 return -ENOMEM;
557 vcpu->arch.gmap->private = vcpu->kvm;
558 return 0;
559 }
560
561 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
562 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | 820 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
563 KVM_SYNC_GPRS | 821 KVM_SYNC_GPRS |
564 KVM_SYNC_ACRS | 822 KVM_SYNC_ACRS |
565 KVM_SYNC_CRS | 823 KVM_SYNC_CRS |
566 KVM_SYNC_ARCH0 | 824 KVM_SYNC_ARCH0 |
567 KVM_SYNC_PFAULT; 825 KVM_SYNC_PFAULT;
826
827 if (kvm_is_ucontrol(vcpu->kvm))
828 return __kvm_ucontrol_vcpu_init(vcpu);
829
568 return 0; 830 return 0;
569} 831}
570 832
@@ -615,9 +877,13 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
615 kvm_s390_clear_local_irqs(vcpu); 877 kvm_s390_clear_local_irqs(vcpu);
616} 878}
617 879
618int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 880void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
619{ 881{
620 return 0; 882 mutex_lock(&vcpu->kvm->lock);
883 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
884 mutex_unlock(&vcpu->kvm->lock);
885 if (!kvm_is_ucontrol(vcpu->kvm))
886 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
621} 887}
622 888
623static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) 889static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
@@ -625,6 +891,13 @@ static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
625 if (!test_vfacility(76)) 891 if (!test_vfacility(76))
626 return; 892 return;
627 893
894 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
895
896 if (vcpu->kvm->arch.crypto.aes_kw)
897 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
898 if (vcpu->kvm->arch.crypto.dea_kw)
899 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
900
628 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; 901 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
629} 902}
630 903
@@ -658,9 +931,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
658 vcpu->arch.sie_block->ecb |= 0x10; 931 vcpu->arch.sie_block->ecb |= 0x10;
659 932
660 vcpu->arch.sie_block->ecb2 = 8; 933 vcpu->arch.sie_block->ecb2 = 8;
661 vcpu->arch.sie_block->eca = 0xD1002000U; 934 vcpu->arch.sie_block->eca = 0xC1002000U;
662 if (sclp_has_siif()) 935 if (sclp_has_siif())
663 vcpu->arch.sie_block->eca |= 1; 936 vcpu->arch.sie_block->eca |= 1;
937 if (sclp_has_sigpif())
938 vcpu->arch.sie_block->eca |= 0x10000000U;
664 vcpu->arch.sie_block->fac = (int) (long) vfacilities; 939 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
665 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | 940 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
666 ICTL_TPROT; 941 ICTL_TPROT;
@@ -670,7 +945,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
670 if (rc) 945 if (rc)
671 return rc; 946 return rc;
672 } 947 }
673 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 948 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
674 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; 949 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
675 get_cpu_id(&vcpu->arch.cpu_id); 950 get_cpu_id(&vcpu->arch.cpu_id);
676 vcpu->arch.cpu_id.version = 0xff; 951 vcpu->arch.cpu_id.version = 0xff;
@@ -741,7 +1016,7 @@ out:
741 1016
742int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 1017int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
743{ 1018{
744 return kvm_cpu_has_interrupt(vcpu); 1019 return kvm_s390_vcpu_has_irq(vcpu, 0);
745} 1020}
746 1021
747void s390_vcpu_block(struct kvm_vcpu *vcpu) 1022void s390_vcpu_block(struct kvm_vcpu *vcpu)
@@ -869,6 +1144,8 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
869 case KVM_REG_S390_PFTOKEN: 1144 case KVM_REG_S390_PFTOKEN:
870 r = get_user(vcpu->arch.pfault_token, 1145 r = get_user(vcpu->arch.pfault_token,
871 (u64 __user *)reg->addr); 1146 (u64 __user *)reg->addr);
1147 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1148 kvm_clear_async_pf_completion_queue(vcpu);
872 break; 1149 break;
873 case KVM_REG_S390_PFCOMPARE: 1150 case KVM_REG_S390_PFCOMPARE:
874 r = get_user(vcpu->arch.pfault_compare, 1151 r = get_user(vcpu->arch.pfault_compare,
@@ -1176,7 +1453,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1176 return 0; 1453 return 0;
1177 if (psw_extint_disabled(vcpu)) 1454 if (psw_extint_disabled(vcpu))
1178 return 0; 1455 return 0;
1179 if (kvm_cpu_has_interrupt(vcpu)) 1456 if (kvm_s390_vcpu_has_irq(vcpu, 0))
1180 return 0; 1457 return 0;
1181 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) 1458 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1182 return 0; 1459 return 0;
@@ -1341,6 +1618,8 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1341 vcpu->arch.pfault_token = kvm_run->s.regs.pft; 1618 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1342 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; 1619 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1343 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; 1620 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
1621 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1622 kvm_clear_async_pf_completion_queue(vcpu);
1344 } 1623 }
1345 kvm_run->kvm_dirty_regs = 0; 1624 kvm_run->kvm_dirty_regs = 0;
1346} 1625}
@@ -1559,15 +1838,10 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1559 spin_lock(&vcpu->kvm->arch.start_stop_lock); 1838 spin_lock(&vcpu->kvm->arch.start_stop_lock);
1560 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 1839 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1561 1840
1562 /* Need to lock access to action_bits to avoid a SIGP race condition */
1563 spin_lock(&vcpu->arch.local_int.lock);
1564 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1565
1566 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 1841 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1567 vcpu->arch.local_int.action_bits &= 1842 kvm_s390_clear_stop_irq(vcpu);
1568 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
1569 spin_unlock(&vcpu->arch.local_int.lock);
1570 1843
1844 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1571 __disable_ibs_on_vcpu(vcpu); 1845 __disable_ibs_on_vcpu(vcpu);
1572 1846
1573 for (i = 0; i < online_vcpus; i++) { 1847 for (i = 0; i < online_vcpus; i++) {
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index a8f3d9b71c11..c22dce8a7536 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -228,11 +228,13 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
228 struct kvm_s390_irq *s390irq); 228 struct kvm_s390_irq *s390irq);
229 229
230/* implemented in interrupt.c */ 230/* implemented in interrupt.c */
231int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); 231int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
232int psw_extint_disabled(struct kvm_vcpu *vcpu); 232int psw_extint_disabled(struct kvm_vcpu *vcpu);
233void kvm_s390_destroy_adapters(struct kvm *kvm); 233void kvm_s390_destroy_adapters(struct kvm *kvm);
234int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu); 234int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
235extern struct kvm_device_ops kvm_flic_ops; 235extern struct kvm_device_ops kvm_flic_ops;
236int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
237void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
236 238
237/* implemented in guestdbg.c */ 239/* implemented in guestdbg.c */
238void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); 240void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 6651f9f73973..23b1e86b2122 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -26,15 +26,17 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
26 struct kvm_s390_local_interrupt *li; 26 struct kvm_s390_local_interrupt *li;
27 int cpuflags; 27 int cpuflags;
28 int rc; 28 int rc;
29 int ext_call_pending;
29 30
30 li = &dst_vcpu->arch.local_int; 31 li = &dst_vcpu->arch.local_int;
31 32
32 cpuflags = atomic_read(li->cpuflags); 33 cpuflags = atomic_read(li->cpuflags);
33 if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED))) 34 ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu);
35 if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending)
34 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 36 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
35 else { 37 else {
36 *reg &= 0xffffffff00000000UL; 38 *reg &= 0xffffffff00000000UL;
37 if (cpuflags & CPUSTAT_ECALL_PEND) 39 if (ext_call_pending)
38 *reg |= SIGP_STATUS_EXT_CALL_PENDING; 40 *reg |= SIGP_STATUS_EXT_CALL_PENDING;
39 if (cpuflags & CPUSTAT_STOPPED) 41 if (cpuflags & CPUSTAT_STOPPED)
40 *reg |= SIGP_STATUS_STOPPED; 42 *reg |= SIGP_STATUS_STOPPED;
@@ -96,7 +98,7 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
96} 98}
97 99
98static int __sigp_external_call(struct kvm_vcpu *vcpu, 100static int __sigp_external_call(struct kvm_vcpu *vcpu,
99 struct kvm_vcpu *dst_vcpu) 101 struct kvm_vcpu *dst_vcpu, u64 *reg)
100{ 102{
101 struct kvm_s390_irq irq = { 103 struct kvm_s390_irq irq = {
102 .type = KVM_S390_INT_EXTERNAL_CALL, 104 .type = KVM_S390_INT_EXTERNAL_CALL,
@@ -105,45 +107,31 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu,
105 int rc; 107 int rc;
106 108
107 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 109 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
108 if (!rc) 110 if (rc == -EBUSY) {
111 *reg &= 0xffffffff00000000UL;
112 *reg |= SIGP_STATUS_EXT_CALL_PENDING;
113 return SIGP_CC_STATUS_STORED;
114 } else if (rc == 0) {
109 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", 115 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
110 dst_vcpu->vcpu_id); 116 dst_vcpu->vcpu_id);
111
112 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
113}
114
115static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
116{
117 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
118 int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
119
120 spin_lock(&li->lock);
121 if (li->action_bits & ACTION_STOP_ON_STOP) {
122 /* another SIGP STOP is pending */
123 rc = SIGP_CC_BUSY;
124 goto out;
125 } 117 }
126 if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
127 if ((action & ACTION_STORE_ON_STOP) != 0)
128 rc = -ESHUTDOWN;
129 goto out;
130 }
131 set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
132 li->action_bits |= action;
133 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
134 kvm_s390_vcpu_wakeup(dst_vcpu);
135out:
136 spin_unlock(&li->lock);
137 118
138 return rc; 119 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
139} 120}
140 121
141static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) 122static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
142{ 123{
124 struct kvm_s390_irq irq = {
125 .type = KVM_S390_SIGP_STOP,
126 };
143 int rc; 127 int rc;
144 128
145 rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP); 129 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
146 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id); 130 if (rc == -EBUSY)
131 rc = SIGP_CC_BUSY;
132 else if (rc == 0)
133 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x",
134 dst_vcpu->vcpu_id);
147 135
148 return rc; 136 return rc;
149} 137}
@@ -151,20 +139,18 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
151static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu, 139static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
152 struct kvm_vcpu *dst_vcpu, u64 *reg) 140 struct kvm_vcpu *dst_vcpu, u64 *reg)
153{ 141{
142 struct kvm_s390_irq irq = {
143 .type = KVM_S390_SIGP_STOP,
144 .u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS,
145 };
154 int rc; 146 int rc;
155 147
156 rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP | 148 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
157 ACTION_STORE_ON_STOP); 149 if (rc == -EBUSY)
158 VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x", 150 rc = SIGP_CC_BUSY;
159 dst_vcpu->vcpu_id); 151 else if (rc == 0)
160 152 VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
161 if (rc == -ESHUTDOWN) { 153 dst_vcpu->vcpu_id);
162 /* If the CPU has already been stopped, we still have
163 * to save the status when doing stop-and-store. This
164 * has to be done after unlocking all spinlocks. */
165 rc = kvm_s390_store_status_unloaded(dst_vcpu,
166 KVM_S390_STORE_STATUS_NOADDR);
167 }
168 154
169 return rc; 155 return rc;
170} 156}
@@ -197,41 +183,33 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
197static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, 183static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
198 u32 address, u64 *reg) 184 u32 address, u64 *reg)
199{ 185{
200 struct kvm_s390_local_interrupt *li; 186 struct kvm_s390_irq irq = {
187 .type = KVM_S390_SIGP_SET_PREFIX,
188 .u.prefix.address = address & 0x7fffe000u,
189 };
201 int rc; 190 int rc;
202 191
203 li = &dst_vcpu->arch.local_int;
204
205 /* 192 /*
206 * Make sure the new value is valid memory. We only need to check the 193 * Make sure the new value is valid memory. We only need to check the
207 * first page, since address is 8k aligned and memory pieces are always 194 * first page, since address is 8k aligned and memory pieces are always
208 * at least 1MB aligned and have at least a size of 1MB. 195 * at least 1MB aligned and have at least a size of 1MB.
209 */ 196 */
210 address &= 0x7fffe000u; 197 if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
211 if (kvm_is_error_gpa(vcpu->kvm, address)) {
212 *reg &= 0xffffffff00000000UL; 198 *reg &= 0xffffffff00000000UL;
213 *reg |= SIGP_STATUS_INVALID_PARAMETER; 199 *reg |= SIGP_STATUS_INVALID_PARAMETER;
214 return SIGP_CC_STATUS_STORED; 200 return SIGP_CC_STATUS_STORED;
215 } 201 }
216 202
217 spin_lock(&li->lock); 203 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
218 /* cpu must be in stopped state */ 204 if (rc == -EBUSY) {
219 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
220 *reg &= 0xffffffff00000000UL; 205 *reg &= 0xffffffff00000000UL;
221 *reg |= SIGP_STATUS_INCORRECT_STATE; 206 *reg |= SIGP_STATUS_INCORRECT_STATE;
222 rc = SIGP_CC_STATUS_STORED; 207 return SIGP_CC_STATUS_STORED;
223 goto out_li; 208 } else if (rc == 0) {
209 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x",
210 dst_vcpu->vcpu_id, irq.u.prefix.address);
224 } 211 }
225 212
226 li->irq.prefix.address = address;
227 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
228 kvm_s390_vcpu_wakeup(dst_vcpu);
229 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
230
231 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id,
232 address);
233out_li:
234 spin_unlock(&li->lock);
235 return rc; 213 return rc;
236} 214}
237 215
@@ -242,9 +220,7 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
242 int flags; 220 int flags;
243 int rc; 221 int rc;
244 222
245 spin_lock(&dst_vcpu->arch.local_int.lock);
246 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); 223 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
247 spin_unlock(&dst_vcpu->arch.local_int.lock);
248 if (!(flags & CPUSTAT_STOPPED)) { 224 if (!(flags & CPUSTAT_STOPPED)) {
249 *reg &= 0xffffffff00000000UL; 225 *reg &= 0xffffffff00000000UL;
250 *reg |= SIGP_STATUS_INCORRECT_STATE; 226 *reg |= SIGP_STATUS_INCORRECT_STATE;
@@ -291,8 +267,9 @@ static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
291 /* handle (RE)START in user space */ 267 /* handle (RE)START in user space */
292 int rc = -EOPNOTSUPP; 268 int rc = -EOPNOTSUPP;
293 269
270 /* make sure we don't race with STOP irq injection */
294 spin_lock(&li->lock); 271 spin_lock(&li->lock);
295 if (li->action_bits & ACTION_STOP_ON_STOP) 272 if (kvm_s390_is_stop_irq_pending(dst_vcpu))
296 rc = SIGP_CC_BUSY; 273 rc = SIGP_CC_BUSY;
297 spin_unlock(&li->lock); 274 spin_unlock(&li->lock);
298 275
@@ -333,7 +310,7 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
333 break; 310 break;
334 case SIGP_EXTERNAL_CALL: 311 case SIGP_EXTERNAL_CALL:
335 vcpu->stat.instruction_sigp_external_call++; 312 vcpu->stat.instruction_sigp_external_call++;
336 rc = __sigp_external_call(vcpu, dst_vcpu); 313 rc = __sigp_external_call(vcpu, dst_vcpu, status_reg);
337 break; 314 break;
338 case SIGP_EMERGENCY_SIGNAL: 315 case SIGP_EMERGENCY_SIGNAL:
339 vcpu->stat.instruction_sigp_emergency++; 316 vcpu->stat.instruction_sigp_emergency++;
@@ -394,6 +371,53 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
394 return rc; 371 return rc;
395} 372}
396 373
374static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
375{
376 if (!vcpu->kvm->arch.user_sigp)
377 return 0;
378
379 switch (order_code) {
380 case SIGP_SENSE:
381 case SIGP_EXTERNAL_CALL:
382 case SIGP_EMERGENCY_SIGNAL:
383 case SIGP_COND_EMERGENCY_SIGNAL:
384 case SIGP_SENSE_RUNNING:
385 return 0;
386 /* update counters as we're directly dropping to user space */
387 case SIGP_STOP:
388 vcpu->stat.instruction_sigp_stop++;
389 break;
390 case SIGP_STOP_AND_STORE_STATUS:
391 vcpu->stat.instruction_sigp_stop_store_status++;
392 break;
393 case SIGP_STORE_STATUS_AT_ADDRESS:
394 vcpu->stat.instruction_sigp_store_status++;
395 break;
396 case SIGP_SET_PREFIX:
397 vcpu->stat.instruction_sigp_prefix++;
398 break;
399 case SIGP_START:
400 vcpu->stat.instruction_sigp_start++;
401 break;
402 case SIGP_RESTART:
403 vcpu->stat.instruction_sigp_restart++;
404 break;
405 case SIGP_INITIAL_CPU_RESET:
406 vcpu->stat.instruction_sigp_init_cpu_reset++;
407 break;
408 case SIGP_CPU_RESET:
409 vcpu->stat.instruction_sigp_cpu_reset++;
410 break;
411 default:
412 vcpu->stat.instruction_sigp_unknown++;
413 }
414
415 VCPU_EVENT(vcpu, 4, "sigp order %u: completely handled in user space",
416 order_code);
417
418 return 1;
419}
420
397int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) 421int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
398{ 422{
399 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 423 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
@@ -408,6 +432,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
408 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 432 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
409 433
410 order_code = kvm_s390_get_base_disp_rs(vcpu); 434 order_code = kvm_s390_get_base_disp_rs(vcpu);
435 if (handle_sigp_order_in_user_space(vcpu, order_code))
436 return -EOPNOTSUPP;
411 437
412 if (r1 % 2) 438 if (r1 % 2)
413 parameter = vcpu->run->s.regs.gprs[r1]; 439 parameter = vcpu->run->s.regs.gprs[r1];
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 647e9d6a4818..653a7ec09ef5 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -209,19 +209,21 @@ TRACE_EVENT(kvm_s390_request_resets,
209 * Trace point for a vcpu's stop requests. 209 * Trace point for a vcpu's stop requests.
210 */ 210 */
211TRACE_EVENT(kvm_s390_stop_request, 211TRACE_EVENT(kvm_s390_stop_request,
212 TP_PROTO(unsigned int action_bits), 212 TP_PROTO(unsigned char stop_irq, unsigned char flags),
213 TP_ARGS(action_bits), 213 TP_ARGS(stop_irq, flags),
214 214
215 TP_STRUCT__entry( 215 TP_STRUCT__entry(
216 __field(unsigned int, action_bits) 216 __field(unsigned char, stop_irq)
217 __field(unsigned char, flags)
217 ), 218 ),
218 219
219 TP_fast_assign( 220 TP_fast_assign(
220 __entry->action_bits = action_bits; 221 __entry->stop_irq = stop_irq;
222 __entry->flags = flags;
221 ), 223 ),
222 224
223 TP_printk("stop request, action_bits = %08x", 225 TP_printk("stop request, stop irq = %u, flags = %08x",
224 __entry->action_bits) 226 __entry->stop_irq, __entry->flags)
225 ); 227 );
226 228
227 229
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d2bbb2d86610..afa0815bd384 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7021,15 +7021,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
7021 return r; 7021 return r;
7022} 7022}
7023 7023
7024int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 7024void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
7025{ 7025{
7026 int r;
7027 struct msr_data msr; 7026 struct msr_data msr;
7028 struct kvm *kvm = vcpu->kvm; 7027 struct kvm *kvm = vcpu->kvm;
7029 7028
7030 r = vcpu_load(vcpu); 7029 if (vcpu_load(vcpu))
7031 if (r) 7030 return;
7032 return r;
7033 msr.data = 0x0; 7031 msr.data = 0x0;
7034 msr.index = MSR_IA32_TSC; 7032 msr.index = MSR_IA32_TSC;
7035 msr.host_initiated = true; 7033 msr.host_initiated = true;
@@ -7038,8 +7036,6 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
7038 7036
7039 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 7037 schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
7040 KVMCLOCK_SYNC_PERIOD); 7038 KVMCLOCK_SYNC_PERIOD);
7041
7042 return r;
7043} 7039}
7044 7040
7045void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 7041void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 5bd6cb145a87..b8a5bf5f1f3d 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -49,6 +49,7 @@ static unsigned long sclp_hsa_size;
49static unsigned int sclp_max_cpu; 49static unsigned int sclp_max_cpu;
50static struct sclp_ipl_info sclp_ipl_info; 50static struct sclp_ipl_info sclp_ipl_info;
51static unsigned char sclp_siif; 51static unsigned char sclp_siif;
52static unsigned char sclp_sigpif;
52static u32 sclp_ibc; 53static u32 sclp_ibc;
53 54
54u64 sclp_facilities; 55u64 sclp_facilities;
@@ -131,6 +132,7 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
131 if (boot_cpu_address != cpue->address) 132 if (boot_cpu_address != cpue->address)
132 continue; 133 continue;
133 sclp_siif = cpue->siif; 134 sclp_siif = cpue->siif;
135 sclp_sigpif = cpue->sigpif;
134 break; 136 break;
135 } 137 }
136 138
@@ -172,6 +174,12 @@ int sclp_has_siif(void)
172} 174}
173EXPORT_SYMBOL(sclp_has_siif); 175EXPORT_SYMBOL(sclp_has_siif);
174 176
177int sclp_has_sigpif(void)
178{
179 return sclp_sigpif;
180}
181EXPORT_SYMBOL(sclp_has_sigpif);
182
175unsigned int sclp_get_ibc(void) 183unsigned int sclp_get_ibc(void)
176{ 184{
177 return sclp_ibc; 185 return sclp_ibc;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0ef2daa199d8..7d6719522f1f 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -661,7 +661,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
661void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 661void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
662struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); 662struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
663int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); 663int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
664int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); 664void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
665void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 665void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
666 666
667int kvm_arch_hardware_enable(void); 667int kvm_arch_hardware_enable(void);
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index b4e6f1e70f03..805570650062 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -491,6 +491,11 @@ struct kvm_s390_emerg_info {
491 __u16 code; 491 __u16 code;
492}; 492};
493 493
494#define KVM_S390_STOP_FLAG_STORE_STATUS 0x01
495struct kvm_s390_stop_info {
496 __u32 flags;
497};
498
494struct kvm_s390_mchk_info { 499struct kvm_s390_mchk_info {
495 __u64 cr14; 500 __u64 cr14;
496 __u64 mcic; 501 __u64 mcic;
@@ -509,6 +514,7 @@ struct kvm_s390_irq {
509 struct kvm_s390_emerg_info emerg; 514 struct kvm_s390_emerg_info emerg;
510 struct kvm_s390_extcall_info extcall; 515 struct kvm_s390_extcall_info extcall;
511 struct kvm_s390_prefix_info prefix; 516 struct kvm_s390_prefix_info prefix;
517 struct kvm_s390_stop_info stop;
512 struct kvm_s390_mchk_info mchk; 518 struct kvm_s390_mchk_info mchk;
513 char reserved[64]; 519 char reserved[64];
514 } u; 520 } u;
@@ -753,6 +759,7 @@ struct kvm_ppc_smmu_info {
753#define KVM_CAP_PPC_FIXUP_HCALL 103 759#define KVM_CAP_PPC_FIXUP_HCALL 103
754#define KVM_CAP_PPC_ENABLE_HCALL 104 760#define KVM_CAP_PPC_ENABLE_HCALL 104
755#define KVM_CAP_CHECK_EXTENSION_VM 105 761#define KVM_CAP_CHECK_EXTENSION_VM 105
762#define KVM_CAP_S390_USER_SIGP 106
756 763
757#ifdef KVM_CAP_IRQ_ROUTING 764#ifdef KVM_CAP_IRQ_ROUTING
758 765