aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-05-16 17:02:40 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-05-16 17:02:40 -0400
commitafa538f0a171f479f6b3a9718a8608ef471ebd77 (patch)
treeb5cbcce14afbe2da194e4b3cb8f1594cea800fb5
parentd9f89b88f5102ce235b75a5907838e3c7ed84b97 (diff)
parentfda902cb8347da121025c4079b9e87748228a27e (diff)
Merge tag 'kvm-s390-20140516' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next
1. Correct locking for lazy storage key handling A test loop with multiple CPUs triggered a race in the lazy storage key handling as introduced by commit 934bc131efc3e4be6a52f7dd6c4dbf (KVM: s390: Allow skeys to be enabled for the current process). This race should not happen with Linux guests, but let's fix it anyway. Patch touches !/kvm/ code, but is from the s390 maintainer. 2. Better handling of broken guests If we detect a program check loop we stop the guest instead of wasting CPU cycles. 3. Better handling on MVPG emulation The move page handling is improved to be architecturally correct. 3. Trace point rework Let's rework the kvm trace points to have a common header file (for later perf usage) and provided a table based instruction decoder. 4. Interpretive execution of SIGP external call Let the hardware handle most cases of SIGP external call (IPI) and wire up the fixup code for the corner cases. 5. Initial preparations for the IBC facility Prepare the code to handle instruction blocking
-rw-r--r--arch/s390/include/asm/kvm_host.h8
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/include/uapi/asm/sie.h245
-rw-r--r--arch/s390/kvm/diag.c2
-rw-r--r--arch/s390/kvm/gaccess.h8
-rw-r--r--arch/s390/kvm/intercept.c39
-rw-r--r--arch/s390/kvm/interrupt.c30
-rw-r--r--arch/s390/kvm/kvm-s390.c44
-rw-r--r--arch/s390/kvm/kvm-s390.h12
-rw-r--r--arch/s390/kvm/priv.c8
-rw-r--r--arch/s390/kvm/sigp.c35
-rw-r--r--arch/s390/kvm/trace.h60
-rw-r--r--arch/s390/mm/pgtable.c22
-rw-r--r--drivers/s390/char/sclp_early.c12
14 files changed, 431 insertions, 95 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 96b8a67ddaf8..a27f5007062a 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -32,8 +32,10 @@
32#define KVM_NR_IRQCHIPS 1 32#define KVM_NR_IRQCHIPS 1
33#define KVM_IRQCHIP_NUM_PINS 4096 33#define KVM_IRQCHIP_NUM_PINS 4096
34 34
35#define SIGP_CTRL_C 0x00800000
36
35struct sca_entry { 37struct sca_entry {
36 atomic_t scn; 38 atomic_t ctrl;
37 __u32 reserved; 39 __u32 reserved;
38 __u64 sda; 40 __u64 sda;
39 __u64 reserved2[2]; 41 __u64 reserved2[2];
@@ -80,7 +82,9 @@ struct sca_block {
80 82
81struct kvm_s390_sie_block { 83struct kvm_s390_sie_block {
82 atomic_t cpuflags; /* 0x0000 */ 84 atomic_t cpuflags; /* 0x0000 */
83 __u32 prefix; /* 0x0004 */ 85 __u32 : 1; /* 0x0004 */
86 __u32 prefix : 18;
87 __u32 : 13;
84 __u8 reserved08[4]; /* 0x0008 */ 88 __u8 reserved08[4]; /* 0x0008 */
85#define PROG_IN_SIE (1<<0) 89#define PROG_IN_SIE (1<<0)
86 __u32 prog0c; /* 0x000c */ 90 __u32 prog0c; /* 0x000c */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 943d43451116..1aba89b53cb9 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -66,5 +66,6 @@ int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
66unsigned long sclp_get_hsa_size(void); 66unsigned long sclp_get_hsa_size(void);
67void sclp_early_detect(void); 67void sclp_early_detect(void);
68int sclp_has_siif(void); 68int sclp_has_siif(void);
69unsigned int sclp_get_ibc(void);
69 70
70#endif /* _ASM_S390_SCLP_H */ 71#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h
new file mode 100644
index 000000000000..3d97f610198d
--- /dev/null
+++ b/arch/s390/include/uapi/asm/sie.h
@@ -0,0 +1,245 @@
1#ifndef _UAPI_ASM_S390_SIE_H
2#define _UAPI_ASM_S390_SIE_H
3
4#include <asm/sigp.h>
5
6#define diagnose_codes \
7 { 0x10, "DIAG (0x10) release pages" }, \
8 { 0x44, "DIAG (0x44) time slice end" }, \
9 { 0x9c, "DIAG (0x9c) time slice end directed" }, \
10 { 0x204, "DIAG (0x204) logical-cpu utilization" }, \
11 { 0x258, "DIAG (0x258) page-reference services" }, \
12 { 0x308, "DIAG (0x308) ipl functions" }, \
13 { 0x500, "DIAG (0x500) KVM virtio functions" }, \
14 { 0x501, "DIAG (0x501) KVM breakpoint" }
15
16#define sigp_order_codes \
17 { SIGP_SENSE, "SIGP sense" }, \
18 { SIGP_EXTERNAL_CALL, "SIGP external call" }, \
19 { SIGP_EMERGENCY_SIGNAL, "SIGP emergency signal" }, \
20 { SIGP_STOP, "SIGP stop" }, \
21 { SIGP_STOP_AND_STORE_STATUS, "SIGP stop and store status" }, \
22 { SIGP_SET_ARCHITECTURE, "SIGP set architecture" }, \
23 { SIGP_SET_PREFIX, "SIGP set prefix" }, \
24 { SIGP_SENSE_RUNNING, "SIGP sense running" }, \
25 { SIGP_RESTART, "SIGP restart" }, \
26 { SIGP_INITIAL_CPU_RESET, "SIGP initial cpu reset" }, \
27 { SIGP_STORE_STATUS_AT_ADDRESS, "SIGP store status at address" }
28
29#define icpt_prog_codes \
30 { 0x0001, "Prog Operation" }, \
31 { 0x0002, "Prog Privileged Operation" }, \
32 { 0x0003, "Prog Execute" }, \
33 { 0x0004, "Prog Protection" }, \
34 { 0x0005, "Prog Addressing" }, \
35 { 0x0006, "Prog Specification" }, \
36 { 0x0007, "Prog Data" }, \
37 { 0x0008, "Prog Fixedpoint overflow" }, \
38 { 0x0009, "Prog Fixedpoint divide" }, \
39 { 0x000A, "Prog Decimal overflow" }, \
40 { 0x000B, "Prog Decimal divide" }, \
41 { 0x000C, "Prog HFP exponent overflow" }, \
42 { 0x000D, "Prog HFP exponent underflow" }, \
43 { 0x000E, "Prog HFP significance" }, \
44 { 0x000F, "Prog HFP divide" }, \
45 { 0x0010, "Prog Segment translation" }, \
46 { 0x0011, "Prog Page translation" }, \
47 { 0x0012, "Prog Translation specification" }, \
48 { 0x0013, "Prog Special operation" }, \
49 { 0x0015, "Prog Operand" }, \
50 { 0x0016, "Prog Trace table" }, \
51 { 0x0017, "Prog ASNtranslation specification" }, \
52 { 0x001C, "Prog Spaceswitch event" }, \
53 { 0x001D, "Prog HFP square root" }, \
54 { 0x001F, "Prog PCtranslation specification" }, \
55 { 0x0020, "Prog AFX translation" }, \
56 { 0x0021, "Prog ASX translation" }, \
57 { 0x0022, "Prog LX translation" }, \
58 { 0x0023, "Prog EX translation" }, \
59 { 0x0024, "Prog Primary authority" }, \
60 { 0x0025, "Prog Secondary authority" }, \
61 { 0x0026, "Prog LFXtranslation exception" }, \
62 { 0x0027, "Prog LSXtranslation exception" }, \
63 { 0x0028, "Prog ALET specification" }, \
64 { 0x0029, "Prog ALEN translation" }, \
65 { 0x002A, "Prog ALE sequence" }, \
66 { 0x002B, "Prog ASTE validity" }, \
67 { 0x002C, "Prog ASTE sequence" }, \
68 { 0x002D, "Prog Extended authority" }, \
69 { 0x002E, "Prog LSTE sequence" }, \
70 { 0x002F, "Prog ASTE instance" }, \
71 { 0x0030, "Prog Stack full" }, \
72 { 0x0031, "Prog Stack empty" }, \
73 { 0x0032, "Prog Stack specification" }, \
74 { 0x0033, "Prog Stack type" }, \
75 { 0x0034, "Prog Stack operation" }, \
76 { 0x0039, "Prog Region first translation" }, \
77 { 0x003A, "Prog Region second translation" }, \
78 { 0x003B, "Prog Region third translation" }, \
79 { 0x0040, "Prog Monitor event" }, \
80 { 0x0080, "Prog PER event" }, \
81 { 0x0119, "Prog Crypto operation" }
82
83#define exit_code_ipa0(ipa0, opcode, mnemonic) \
84 { (ipa0 << 8 | opcode), #ipa0 " " mnemonic }
85#define exit_code(opcode, mnemonic) \
86 { opcode, mnemonic }
87
88#define icpt_insn_codes \
89 exit_code_ipa0(0x01, 0x01, "PR"), \
90 exit_code_ipa0(0x01, 0x04, "PTFF"), \
91 exit_code_ipa0(0x01, 0x07, "SCKPF"), \
92 exit_code_ipa0(0xAA, 0x00, "RINEXT"), \
93 exit_code_ipa0(0xAA, 0x01, "RION"), \
94 exit_code_ipa0(0xAA, 0x02, "TRIC"), \
95 exit_code_ipa0(0xAA, 0x03, "RIOFF"), \
96 exit_code_ipa0(0xAA, 0x04, "RIEMIT"), \
97 exit_code_ipa0(0xB2, 0x02, "STIDP"), \
98 exit_code_ipa0(0xB2, 0x04, "SCK"), \
99 exit_code_ipa0(0xB2, 0x05, "STCK"), \
100 exit_code_ipa0(0xB2, 0x06, "SCKC"), \
101 exit_code_ipa0(0xB2, 0x07, "STCKC"), \
102 exit_code_ipa0(0xB2, 0x08, "SPT"), \
103 exit_code_ipa0(0xB2, 0x09, "STPT"), \
104 exit_code_ipa0(0xB2, 0x0d, "PTLB"), \
105 exit_code_ipa0(0xB2, 0x10, "SPX"), \
106 exit_code_ipa0(0xB2, 0x11, "STPX"), \
107 exit_code_ipa0(0xB2, 0x12, "STAP"), \
108 exit_code_ipa0(0xB2, 0x14, "SIE"), \
109 exit_code_ipa0(0xB2, 0x16, "SETR"), \
110 exit_code_ipa0(0xB2, 0x17, "STETR"), \
111 exit_code_ipa0(0xB2, 0x18, "PC"), \
112 exit_code_ipa0(0xB2, 0x20, "SERVC"), \
113 exit_code_ipa0(0xB2, 0x28, "PT"), \
114 exit_code_ipa0(0xB2, 0x29, "ISKE"), \
115 exit_code_ipa0(0xB2, 0x2a, "RRBE"), \
116 exit_code_ipa0(0xB2, 0x2b, "SSKE"), \
117 exit_code_ipa0(0xB2, 0x2c, "TB"), \
118 exit_code_ipa0(0xB2, 0x2e, "PGIN"), \
119 exit_code_ipa0(0xB2, 0x2f, "PGOUT"), \
120 exit_code_ipa0(0xB2, 0x30, "CSCH"), \
121 exit_code_ipa0(0xB2, 0x31, "HSCH"), \
122 exit_code_ipa0(0xB2, 0x32, "MSCH"), \
123 exit_code_ipa0(0xB2, 0x33, "SSCH"), \
124 exit_code_ipa0(0xB2, 0x34, "STSCH"), \
125 exit_code_ipa0(0xB2, 0x35, "TSCH"), \
126 exit_code_ipa0(0xB2, 0x36, "TPI"), \
127 exit_code_ipa0(0xB2, 0x37, "SAL"), \
128 exit_code_ipa0(0xB2, 0x38, "RSCH"), \
129 exit_code_ipa0(0xB2, 0x39, "STCRW"), \
130 exit_code_ipa0(0xB2, 0x3a, "STCPS"), \
131 exit_code_ipa0(0xB2, 0x3b, "RCHP"), \
132 exit_code_ipa0(0xB2, 0x3c, "SCHM"), \
133 exit_code_ipa0(0xB2, 0x40, "BAKR"), \
134 exit_code_ipa0(0xB2, 0x48, "PALB"), \
135 exit_code_ipa0(0xB2, 0x4c, "TAR"), \
136 exit_code_ipa0(0xB2, 0x50, "CSP"), \
137 exit_code_ipa0(0xB2, 0x54, "MVPG"), \
138 exit_code_ipa0(0xB2, 0x58, "BSG"), \
139 exit_code_ipa0(0xB2, 0x5a, "BSA"), \
140 exit_code_ipa0(0xB2, 0x5f, "CHSC"), \
141 exit_code_ipa0(0xB2, 0x74, "SIGA"), \
142 exit_code_ipa0(0xB2, 0x76, "XSCH"), \
143 exit_code_ipa0(0xB2, 0x78, "STCKE"), \
144 exit_code_ipa0(0xB2, 0x7c, "STCKF"), \
145 exit_code_ipa0(0xB2, 0x7d, "STSI"), \
146 exit_code_ipa0(0xB2, 0xb0, "STFLE"), \
147 exit_code_ipa0(0xB2, 0xb1, "STFL"), \
148 exit_code_ipa0(0xB2, 0xb2, "LPSWE"), \
149 exit_code_ipa0(0xB2, 0xf8, "TEND"), \
150 exit_code_ipa0(0xB2, 0xfc, "TABORT"), \
151 exit_code_ipa0(0xB9, 0x1e, "KMAC"), \
152 exit_code_ipa0(0xB9, 0x28, "PCKMO"), \
153 exit_code_ipa0(0xB9, 0x2a, "KMF"), \
154 exit_code_ipa0(0xB9, 0x2b, "KMO"), \
155 exit_code_ipa0(0xB9, 0x2d, "KMCTR"), \
156 exit_code_ipa0(0xB9, 0x2e, "KM"), \
157 exit_code_ipa0(0xB9, 0x2f, "KMC"), \
158 exit_code_ipa0(0xB9, 0x3e, "KIMD"), \
159 exit_code_ipa0(0xB9, 0x3f, "KLMD"), \
160 exit_code_ipa0(0xB9, 0x8a, "CSPG"), \
161 exit_code_ipa0(0xB9, 0x8d, "EPSW"), \
162 exit_code_ipa0(0xB9, 0x8e, "IDTE"), \
163 exit_code_ipa0(0xB9, 0x8f, "CRDTE"), \
164 exit_code_ipa0(0xB9, 0x9c, "EQBS"), \
165 exit_code_ipa0(0xB9, 0xa2, "PTF"), \
166 exit_code_ipa0(0xB9, 0xab, "ESSA"), \
167 exit_code_ipa0(0xB9, 0xae, "RRBM"), \
168 exit_code_ipa0(0xB9, 0xaf, "PFMF"), \
169 exit_code_ipa0(0xE3, 0x03, "LRAG"), \
170 exit_code_ipa0(0xE3, 0x13, "LRAY"), \
171 exit_code_ipa0(0xE3, 0x25, "NTSTG"), \
172 exit_code_ipa0(0xE5, 0x00, "LASP"), \
173 exit_code_ipa0(0xE5, 0x01, "TPROT"), \
174 exit_code_ipa0(0xE5, 0x60, "TBEGIN"), \
175 exit_code_ipa0(0xE5, 0x61, "TBEGINC"), \
176 exit_code_ipa0(0xEB, 0x25, "STCTG"), \
177 exit_code_ipa0(0xEB, 0x2f, "LCTLG"), \
178 exit_code_ipa0(0xEB, 0x60, "LRIC"), \
179 exit_code_ipa0(0xEB, 0x61, "STRIC"), \
180 exit_code_ipa0(0xEB, 0x62, "MRIC"), \
181 exit_code_ipa0(0xEB, 0x8a, "SQBS"), \
182 exit_code_ipa0(0xC8, 0x01, "ECTG"), \
183 exit_code(0x0a, "SVC"), \
184 exit_code(0x80, "SSM"), \
185 exit_code(0x82, "LPSW"), \
186 exit_code(0x83, "DIAG"), \
187 exit_code(0xae, "SIGP"), \
188 exit_code(0xac, "STNSM"), \
189 exit_code(0xad, "STOSM"), \
190 exit_code(0xb1, "LRA"), \
191 exit_code(0xb6, "STCTL"), \
192 exit_code(0xb7, "LCTL"), \
193 exit_code(0xee, "PLO")
194
195#define sie_intercept_code \
196 { 0x00, "Host interruption" }, \
197 { 0x04, "Instruction" }, \
198 { 0x08, "Program interruption" }, \
199 { 0x0c, "Instruction and program interruption" }, \
200 { 0x10, "External request" }, \
201 { 0x14, "External interruption" }, \
202 { 0x18, "I/O request" }, \
203 { 0x1c, "Wait state" }, \
204 { 0x20, "Validity" }, \
205 { 0x28, "Stop request" }, \
206 { 0x2c, "Operation exception" }, \
207 { 0x38, "Partial-execution" }, \
208 { 0x3c, "I/O interruption" }, \
209 { 0x40, "I/O instruction" }, \
210 { 0x48, "Timing subset" }
211
212/*
213 * This is the simple interceptable instructions decoder.
214 *
215 * It will be used as userspace interface and it can be used in places
216 * that does not allow to use general decoder functions,
217 * such as trace events declarations.
218 *
219 * Some userspace tools may want to parse this code
220 * and would be confused by switch(), if() and other statements,
221 * but they can understand conditional operator.
222 */
223#define INSN_DECODE_IPA0(ipa0, insn, rshift, mask) \
224 (insn >> 56) == (ipa0) ? \
225 ((ipa0 << 8) | ((insn >> rshift) & mask)) :
226
227#define INSN_DECODE(insn) (insn >> 56)
228
229/*
230 * The macro icpt_insn_decoder() takes an intercepted instruction
231 * and returns a key, which can be used to find a mnemonic name
232 * of the instruction in the icpt_insn_codes table.
233 */
234#define icpt_insn_decoder(insn) \
235 INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \
236 INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \
237 INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \
238 INSN_DECODE_IPA0(0xb9, insn, 48, 0xff) \
239 INSN_DECODE_IPA0(0xe3, insn, 48, 0xff) \
240 INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \
241 INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \
242 INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \
243 INSN_DECODE(insn)
244
245#endif /* _UAPI_ASM_S390_SIE_H */
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 004d385d9519..0161675878a2 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -23,7 +23,7 @@
23static int diag_release_pages(struct kvm_vcpu *vcpu) 23static int diag_release_pages(struct kvm_vcpu *vcpu)
24{ 24{
25 unsigned long start, end; 25 unsigned long start, end;
26 unsigned long prefix = vcpu->arch.sie_block->prefix; 26 unsigned long prefix = kvm_s390_get_prefix(vcpu);
27 27
28 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; 28 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
29 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; 29 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 68db43e4254f..a07ee08ac478 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -30,7 +30,7 @@
30static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, 30static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
31 unsigned long gra) 31 unsigned long gra)
32{ 32{
33 unsigned long prefix = vcpu->arch.sie_block->prefix; 33 unsigned long prefix = kvm_s390_get_prefix(vcpu);
34 34
35 if (gra < 2 * PAGE_SIZE) 35 if (gra < 2 * PAGE_SIZE)
36 gra += prefix; 36 gra += prefix;
@@ -99,7 +99,7 @@ static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
99 unsigned long __gpa; \ 99 unsigned long __gpa; \
100 \ 100 \
101 __gpa = (unsigned long)(gra); \ 101 __gpa = (unsigned long)(gra); \
102 __gpa += __vcpu->arch.sie_block->prefix; \ 102 __gpa += kvm_s390_get_prefix(__vcpu); \
103 kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x)); \ 103 kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x)); \
104}) 104})
105 105
@@ -124,7 +124,7 @@ static inline __must_check
124int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, 124int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
125 unsigned long len) 125 unsigned long len)
126{ 126{
127 unsigned long gpa = gra + vcpu->arch.sie_block->prefix; 127 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
128 128
129 return kvm_write_guest(vcpu->kvm, gpa, data, len); 129 return kvm_write_guest(vcpu->kvm, gpa, data, len);
130} 130}
@@ -150,7 +150,7 @@ static inline __must_check
150int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, 150int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
151 unsigned long len) 151 unsigned long len)
152{ 152{
153 unsigned long gpa = gra + vcpu->arch.sie_block->prefix; 153 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
154 154
155 return kvm_read_guest(vcpu->kvm, gpa, data, len); 155 return kvm_read_guest(vcpu->kvm, gpa, data, len);
156} 156}
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index bd607cf01a5d..a0b586c1913c 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -195,6 +195,7 @@ static int handle_itdb(struct kvm_vcpu *vcpu)
195static int handle_prog(struct kvm_vcpu *vcpu) 195static int handle_prog(struct kvm_vcpu *vcpu)
196{ 196{
197 struct kvm_s390_pgm_info pgm_info; 197 struct kvm_s390_pgm_info pgm_info;
198 psw_t psw;
198 int rc; 199 int rc;
199 200
200 vcpu->stat.exit_program_interruption++; 201 vcpu->stat.exit_program_interruption++;
@@ -207,7 +208,14 @@ static int handle_prog(struct kvm_vcpu *vcpu)
207 } 208 }
208 209
209 trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc); 210 trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
210 211 if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
212 rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
213 if (rc)
214 return rc;
215 /* Avoid endless loops of specification exceptions */
216 if (!is_valid_psw(&psw))
217 return -EOPNOTSUPP;
218 }
211 rc = handle_itdb(vcpu); 219 rc = handle_itdb(vcpu);
212 if (rc) 220 if (rc)
213 return rc; 221 return rc;
@@ -264,6 +272,8 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
264 irq.type = KVM_S390_INT_CPU_TIMER; 272 irq.type = KVM_S390_INT_CPU_TIMER;
265 break; 273 break;
266 case EXT_IRQ_EXTERNAL_CALL: 274 case EXT_IRQ_EXTERNAL_CALL:
275 if (kvm_s390_si_ext_call_pending(vcpu))
276 return 0;
267 irq.type = KVM_S390_INT_EXTERNAL_CALL; 277 irq.type = KVM_S390_INT_EXTERNAL_CALL;
268 irq.parm = vcpu->arch.sie_block->extcpuaddr; 278 irq.parm = vcpu->arch.sie_block->extcpuaddr;
269 break; 279 break;
@@ -284,33 +294,26 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
284 */ 294 */
285static int handle_mvpg_pei(struct kvm_vcpu *vcpu) 295static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
286{ 296{
287 unsigned long hostaddr, srcaddr, dstaddr;
288 psw_t *psw = &vcpu->arch.sie_block->gpsw; 297 psw_t *psw = &vcpu->arch.sie_block->gpsw;
289 struct mm_struct *mm = current->mm; 298 unsigned long srcaddr, dstaddr;
290 int reg1, reg2, rc; 299 int reg1, reg2, rc;
291 300
292 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); 301 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
293 srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]);
294 dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]);
295 302
296 /* Make sure that the source is paged-in */ 303 /* Make sure that the source is paged-in */
297 hostaddr = gmap_fault(srcaddr, vcpu->arch.gmap); 304 srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]);
298 if (IS_ERR_VALUE(hostaddr)) 305 if (kvm_is_error_gpa(vcpu->kvm, srcaddr))
299 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 306 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
300 down_read(&mm->mmap_sem); 307 rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
301 rc = get_user_pages(current, mm, hostaddr, 1, 0, 0, NULL, NULL); 308 if (rc != 0)
302 up_read(&mm->mmap_sem);
303 if (rc < 0)
304 return rc; 309 return rc;
305 310
306 /* Make sure that the destination is paged-in */ 311 /* Make sure that the destination is paged-in */
307 hostaddr = gmap_fault(dstaddr, vcpu->arch.gmap); 312 dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]);
308 if (IS_ERR_VALUE(hostaddr)) 313 if (kvm_is_error_gpa(vcpu->kvm, dstaddr))
309 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 314 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
310 down_read(&mm->mmap_sem); 315 rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
311 rc = get_user_pages(current, mm, hostaddr, 1, 1, 0, NULL, NULL); 316 if (rc != 0)
312 up_read(&mm->mmap_sem);
313 if (rc < 0)
314 return rc; 317 return rc;
315 318
316 psw->addr = __rewind_psw(*psw, 4); 319 psw->addr = __rewind_psw(*psw, 4);
@@ -322,6 +325,8 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
322{ 325{
323 if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */ 326 if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
324 return handle_mvpg_pei(vcpu); 327 return handle_mvpg_pei(vcpu);
328 if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
329 return kvm_s390_handle_sigp_pei(vcpu);
325 330
326 return -EOPNOTSUPP; 331 return -EOPNOTSUPP;
327} 332}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 75cd3217cd5a..bf0d9bc15bcd 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -148,9 +148,8 @@ static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
148 148
149static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 149static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
150{ 150{
151 atomic_clear_mask(CPUSTAT_ECALL_PEND | 151 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
152 CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 152 &vcpu->arch.sie_block->cpuflags);
153 &vcpu->arch.sie_block->cpuflags);
154 vcpu->arch.sie_block->lctl = 0x0000; 153 vcpu->arch.sie_block->lctl = 0x0000;
155 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); 154 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
156 155
@@ -524,6 +523,20 @@ static void deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
524 } 523 }
525} 524}
526 525
526/* Check whether SIGP interpretation facility has an external call pending */
527int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
528{
529 atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl;
530
531 if (!psw_extint_disabled(vcpu) &&
532 (vcpu->arch.sie_block->gcr[0] & 0x2000ul) &&
533 (atomic_read(sigp_ctrl) & SIGP_CTRL_C) &&
534 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
535 return 1;
536
537 return 0;
538}
539
527int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 540int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
528{ 541{
529 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 542 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -554,6 +567,9 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
554 if (!rc && kvm_cpu_has_pending_timer(vcpu)) 567 if (!rc && kvm_cpu_has_pending_timer(vcpu))
555 rc = 1; 568 rc = 1;
556 569
570 if (!rc && kvm_s390_si_ext_call_pending(vcpu))
571 rc = 1;
572
557 return rc; 573 return rc;
558} 574}
559 575
@@ -610,7 +626,8 @@ no_timer:
610 while (list_empty(&vcpu->arch.local_int.list) && 626 while (list_empty(&vcpu->arch.local_int.list) &&
611 list_empty(&vcpu->arch.local_int.float_int->list) && 627 list_empty(&vcpu->arch.local_int.float_int->list) &&
612 (!vcpu->arch.local_int.timer_due) && 628 (!vcpu->arch.local_int.timer_due) &&
613 !signal_pending(current)) { 629 !signal_pending(current) &&
630 !kvm_s390_si_ext_call_pending(vcpu)) {
614 set_current_state(TASK_INTERRUPTIBLE); 631 set_current_state(TASK_INTERRUPTIBLE);
615 spin_unlock_bh(&vcpu->arch.local_int.lock); 632 spin_unlock_bh(&vcpu->arch.local_int.lock);
616 spin_unlock(&vcpu->arch.local_int.float_int->lock); 633 spin_unlock(&vcpu->arch.local_int.float_int->lock);
@@ -667,6 +684,11 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
667 } 684 }
668 atomic_set(&li->active, 0); 685 atomic_set(&li->active, 0);
669 spin_unlock_bh(&li->lock); 686 spin_unlock_bh(&li->lock);
687
688 /* clear pending external calls set by sigp interpretation facility */
689 atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
690 atomic_clear_mask(SIGP_CTRL_C,
691 &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
670} 692}
671 693
672void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 694void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0a01744cbdd9..e519860c6031 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -633,7 +633,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
633 vcpu->arch.sie_block->ecb |= 0x10; 633 vcpu->arch.sie_block->ecb |= 0x10;
634 634
635 vcpu->arch.sie_block->ecb2 = 8; 635 vcpu->arch.sie_block->ecb2 = 8;
636 vcpu->arch.sie_block->eca = 0xC1002000U; 636 vcpu->arch.sie_block->eca = 0xD1002000U;
637 if (sclp_has_siif()) 637 if (sclp_has_siif())
638 vcpu->arch.sie_block->eca |= 1; 638 vcpu->arch.sie_block->eca |= 1;
639 vcpu->arch.sie_block->fac = (int) (long) vfacilities; 639 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
@@ -753,7 +753,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
753 753
754 kvm_for_each_vcpu(i, vcpu, kvm) { 754 kvm_for_each_vcpu(i, vcpu, kvm) {
755 /* match against both prefix pages */ 755 /* match against both prefix pages */
756 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) { 756 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
757 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); 757 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
758 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 758 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
759 exit_sie_sync(vcpu); 759 exit_sie_sync(vcpu);
@@ -1017,7 +1017,7 @@ retry:
1017 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { 1017 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
1018 int rc; 1018 int rc;
1019 rc = gmap_ipte_notify(vcpu->arch.gmap, 1019 rc = gmap_ipte_notify(vcpu->arch.gmap,
1020 vcpu->arch.sie_block->prefix, 1020 kvm_s390_get_prefix(vcpu),
1021 PAGE_SIZE * 2); 1021 PAGE_SIZE * 2);
1022 if (rc) 1022 if (rc)
1023 return rc; 1023 return rc;
@@ -1045,15 +1045,30 @@ retry:
1045 return 0; 1045 return 0;
1046} 1046}
1047 1047
1048static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu) 1048/**
1049 * kvm_arch_fault_in_page - fault-in guest page if necessary
1050 * @vcpu: The corresponding virtual cpu
1051 * @gpa: Guest physical address
1052 * @writable: Whether the page should be writable or not
1053 *
1054 * Make sure that a guest page has been faulted-in on the host.
1055 *
1056 * Return: Zero on success, negative error code otherwise.
1057 */
1058long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
1049{ 1059{
1050 long rc;
1051 hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
1052 struct mm_struct *mm = current->mm; 1060 struct mm_struct *mm = current->mm;
1061 hva_t hva;
1062 long rc;
1063
1064 hva = gmap_fault(gpa, vcpu->arch.gmap);
1065 if (IS_ERR_VALUE(hva))
1066 return (long)hva;
1053 down_read(&mm->mmap_sem); 1067 down_read(&mm->mmap_sem);
1054 rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL); 1068 rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL);
1055 up_read(&mm->mmap_sem); 1069 up_read(&mm->mmap_sem);
1056 return rc; 1070
1071 return rc < 0 ? rc : 0;
1057} 1072}
1058 1073
1059static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, 1074static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
@@ -1191,9 +1206,12 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1191 } else if (current->thread.gmap_pfault) { 1206 } else if (current->thread.gmap_pfault) {
1192 trace_kvm_s390_major_guest_pfault(vcpu); 1207 trace_kvm_s390_major_guest_pfault(vcpu);
1193 current->thread.gmap_pfault = 0; 1208 current->thread.gmap_pfault = 0;
1194 if (kvm_arch_setup_async_pf(vcpu) || 1209 if (kvm_arch_setup_async_pf(vcpu)) {
1195 (kvm_arch_fault_in_sync(vcpu) >= 0))
1196 rc = 0; 1210 rc = 0;
1211 } else {
1212 gpa_t gpa = current->thread.gmap_addr;
1213 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1214 }
1197 } 1215 }
1198 1216
1199 if (rc == -1) { 1217 if (rc == -1) {
@@ -1320,7 +1338,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1320 1338
1321 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; 1339 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1322 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; 1340 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1323 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix; 1341 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1324 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); 1342 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1325 1343
1326 if (vcpu->sigset_active) 1344 if (vcpu->sigset_active)
@@ -1339,6 +1357,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1339int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) 1357int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1340{ 1358{
1341 unsigned char archmode = 1; 1359 unsigned char archmode = 1;
1360 unsigned int px;
1342 u64 clkcomp; 1361 u64 clkcomp;
1343 int rc; 1362 int rc;
1344 1363
@@ -1357,8 +1376,9 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1357 vcpu->run->s.regs.gprs, 128); 1376 vcpu->run->s.regs.gprs, 128);
1358 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), 1377 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1359 &vcpu->arch.sie_block->gpsw, 16); 1378 &vcpu->arch.sie_block->gpsw, 16);
1379 px = kvm_s390_get_prefix(vcpu);
1360 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), 1380 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1361 &vcpu->arch.sie_block->prefix, 4); 1381 &px, 4);
1362 rc |= write_guest_abs(vcpu, 1382 rc |= write_guest_abs(vcpu,
1363 gpa + offsetof(struct save_area, fp_ctrl_reg), 1383 gpa + offsetof(struct save_area, fp_ctrl_reg),
1364 &vcpu->arch.guest_fpregs.fpc, 4); 1384 &vcpu->arch.guest_fpregs.fpc, 4);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index c28423a3acc0..a8655ed31616 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -61,9 +61,15 @@ static inline int kvm_is_ucontrol(struct kvm *kvm)
61#endif 61#endif
62} 62}
63 63
64#define GUEST_PREFIX_SHIFT 13
65static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
66{
67 return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT;
68}
69
64static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) 70static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
65{ 71{
66 vcpu->arch.sie_block->prefix = prefix & 0x7fffe000u; 72 vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
67 vcpu->arch.sie_block->ihcpu = 0xffff; 73 vcpu->arch.sie_block->ihcpu = 0xffff;
68 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 74 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
69} 75}
@@ -142,6 +148,7 @@ void kvm_s390_reinject_io_int(struct kvm *kvm,
142int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); 148int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
143 149
144/* implemented in priv.c */ 150/* implemented in priv.c */
151int is_valid_psw(psw_t *psw);
145int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); 152int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
146int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); 153int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
147int kvm_s390_handle_01(struct kvm_vcpu *vcpu); 154int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
@@ -153,8 +160,10 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
153 160
154/* implemented in sigp.c */ 161/* implemented in sigp.c */
155int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); 162int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
163int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
156 164
157/* implemented in kvm-s390.c */ 165/* implemented in kvm-s390.c */
166long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
158int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); 167int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
159int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); 168int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
160void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); 169void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
@@ -212,6 +221,7 @@ static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
212int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); 221int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
213int psw_extint_disabled(struct kvm_vcpu *vcpu); 222int psw_extint_disabled(struct kvm_vcpu *vcpu);
214void kvm_s390_destroy_adapters(struct kvm *kvm); 223void kvm_s390_destroy_adapters(struct kvm *kvm);
224int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu);
215 225
216/* implemented in guestdbg.c */ 226/* implemented in guestdbg.c */
217void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); 227void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 07d0c1025cb9..6296159ac883 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -119,8 +119,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
119 if (operand2 & 3) 119 if (operand2 & 3)
120 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 120 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
121 121
122 address = vcpu->arch.sie_block->prefix; 122 address = kvm_s390_get_prefix(vcpu);
123 address = address & 0x7fffe000u;
124 123
125 /* get the value */ 124 /* get the value */
126 rc = write_guest(vcpu, operand2, &address, sizeof(address)); 125 rc = write_guest(vcpu, operand2, &address, sizeof(address));
@@ -365,7 +364,8 @@ static void handle_new_psw(struct kvm_vcpu *vcpu)
365#define PSW_ADDR_24 0x0000000000ffffffUL 364#define PSW_ADDR_24 0x0000000000ffffffUL
366#define PSW_ADDR_31 0x000000007fffffffUL 365#define PSW_ADDR_31 0x000000007fffffffUL
367 366
368static int is_valid_psw(psw_t *psw) { 367int is_valid_psw(psw_t *psw)
368{
369 if (psw->mask & PSW_MASK_UNASSIGNED) 369 if (psw->mask & PSW_MASK_UNASSIGNED)
370 return 0; 370 return 0;
371 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { 371 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
@@ -376,6 +376,8 @@ static int is_valid_psw(psw_t *psw) {
376 return 0; 376 return 0;
377 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) 377 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
378 return 0; 378 return 0;
379 if (psw->addr & 1)
380 return 0;
379 return 1; 381 return 1;
380} 382}
381 383
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index c0b99e0f6b63..d0341d2e54b1 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -458,3 +458,38 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
458 kvm_s390_set_psw_cc(vcpu, rc); 458 kvm_s390_set_psw_cc(vcpu, rc);
459 return 0; 459 return 0;
460} 460}
461
462/*
463 * Handle SIGP partial execution interception.
464 *
465 * This interception will occur at the source cpu when a source cpu sends an
466 * external call to a target cpu and the target cpu has the WAIT bit set in
467 * its cpuflags. Interception will occurr after the interrupt indicator bits at
468 * the target cpu have been set. All error cases will lead to instruction
469 * interception, therefore nothing is to be checked or prepared.
470 */
471int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
472{
473 int r3 = vcpu->arch.sie_block->ipa & 0x000f;
474 u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
475 struct kvm_vcpu *dest_vcpu;
476 u8 order_code = kvm_s390_get_base_disp_rs(vcpu);
477
478 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
479
480 if (order_code == SIGP_EXTERNAL_CALL) {
481 dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
482 BUG_ON(dest_vcpu == NULL);
483
484 spin_lock_bh(&dest_vcpu->arch.local_int.lock);
485 if (waitqueue_active(&dest_vcpu->wq))
486 wake_up_interruptible(&dest_vcpu->wq);
487 dest_vcpu->preempted = true;
488 spin_unlock_bh(&dest_vcpu->arch.local_int.lock);
489
490 kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
491 return 0;
492 }
493
494 return -EOPNOTSUPP;
495}
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h
index abf6ba52769e..916834d7a73a 100644
--- a/arch/s390/kvm/trace.h
+++ b/arch/s390/kvm/trace.h
@@ -2,7 +2,7 @@
2#define _TRACE_KVM_H 2#define _TRACE_KVM_H
3 3
4#include <linux/tracepoint.h> 4#include <linux/tracepoint.h>
5#include <asm/sigp.h> 5#include <asm/sie.h>
6#include <asm/debug.h> 6#include <asm/debug.h>
7#include <asm/dis.h> 7#include <asm/dis.h>
8 8
@@ -125,17 +125,6 @@ TRACE_EVENT(kvm_s390_sie_fault,
125 VCPU_TP_PRINTK("%s", "fault in sie instruction") 125 VCPU_TP_PRINTK("%s", "fault in sie instruction")
126 ); 126 );
127 127
128#define sie_intercept_code \
129 {0x04, "Instruction"}, \
130 {0x08, "Program interruption"}, \
131 {0x0C, "Instruction and program interruption"}, \
132 {0x10, "External request"}, \
133 {0x14, "External interruption"}, \
134 {0x18, "I/O request"}, \
135 {0x1C, "Wait state"}, \
136 {0x20, "Validity"}, \
137 {0x28, "Stop request"}
138
139TRACE_EVENT(kvm_s390_sie_exit, 128TRACE_EVENT(kvm_s390_sie_exit,
140 TP_PROTO(VCPU_PROTO_COMMON, u8 icptcode), 129 TP_PROTO(VCPU_PROTO_COMMON, u8 icptcode),
141 TP_ARGS(VCPU_ARGS_COMMON, icptcode), 130 TP_ARGS(VCPU_ARGS_COMMON, icptcode),
@@ -165,7 +154,6 @@ TRACE_EVENT(kvm_s390_intercept_instruction,
165 TP_STRUCT__entry( 154 TP_STRUCT__entry(
166 VCPU_FIELD_COMMON 155 VCPU_FIELD_COMMON
167 __field(__u64, instruction) 156 __field(__u64, instruction)
168 __field(char, insn[8])
169 ), 157 ),
170 158
171 TP_fast_assign( 159 TP_fast_assign(
@@ -176,10 +164,8 @@ TRACE_EVENT(kvm_s390_intercept_instruction,
176 164
177 VCPU_TP_PRINTK("intercepted instruction %016llx (%s)", 165 VCPU_TP_PRINTK("intercepted instruction %016llx (%s)",
178 __entry->instruction, 166 __entry->instruction,
179 insn_to_mnemonic((unsigned char *) 167 __print_symbolic(icpt_insn_decoder(__entry->instruction),
180 &__entry->instruction, 168 icpt_insn_codes))
181 __entry->insn, sizeof(__entry->insn)) ?
182 "unknown" : __entry->insn)
183 ); 169 );
184 170
185/* 171/*
@@ -227,18 +213,6 @@ TRACE_EVENT(kvm_s390_intercept_validity,
227 * Trace points for instructions that are of special interest. 213 * Trace points for instructions that are of special interest.
228 */ 214 */
229 215
230#define sigp_order_codes \
231 {SIGP_SENSE, "sense"}, \
232 {SIGP_EXTERNAL_CALL, "external call"}, \
233 {SIGP_EMERGENCY_SIGNAL, "emergency signal"}, \
234 {SIGP_STOP, "stop"}, \
235 {SIGP_STOP_AND_STORE_STATUS, "stop and store status"}, \
236 {SIGP_SET_ARCHITECTURE, "set architecture"}, \
237 {SIGP_SET_PREFIX, "set prefix"}, \
238 {SIGP_STORE_STATUS_AT_ADDRESS, "store status at addr"}, \
239 {SIGP_SENSE_RUNNING, "sense running"}, \
240 {SIGP_RESTART, "restart"}
241
242TRACE_EVENT(kvm_s390_handle_sigp, 216TRACE_EVENT(kvm_s390_handle_sigp,
243 TP_PROTO(VCPU_PROTO_COMMON, __u8 order_code, __u16 cpu_addr, \ 217 TP_PROTO(VCPU_PROTO_COMMON, __u8 order_code, __u16 cpu_addr, \
244 __u32 parameter), 218 __u32 parameter),
@@ -265,12 +239,28 @@ TRACE_EVENT(kvm_s390_handle_sigp,
265 __entry->cpu_addr, __entry->parameter) 239 __entry->cpu_addr, __entry->parameter)
266 ); 240 );
267 241
268#define diagnose_codes \ 242TRACE_EVENT(kvm_s390_handle_sigp_pei,
269 {0x10, "release pages"}, \ 243 TP_PROTO(VCPU_PROTO_COMMON, __u8 order_code, __u16 cpu_addr),
270 {0x44, "time slice end"}, \ 244 TP_ARGS(VCPU_ARGS_COMMON, order_code, cpu_addr),
271 {0x308, "ipl functions"}, \ 245
272 {0x500, "kvm hypercall"}, \ 246 TP_STRUCT__entry(
273 {0x501, "kvm breakpoint"} 247 VCPU_FIELD_COMMON
248 __field(__u8, order_code)
249 __field(__u16, cpu_addr)
250 ),
251
252 TP_fast_assign(
253 VCPU_ASSIGN_COMMON
254 __entry->order_code = order_code;
255 __entry->cpu_addr = cpu_addr;
256 ),
257
258 VCPU_TP_PRINTK("handle sigp pei order %02x (%s), cpu address %04x",
259 __entry->order_code,
260 __print_symbolic(__entry->order_code,
261 sigp_order_codes),
262 __entry->cpu_addr)
263 );
274 264
275TRACE_EVENT(kvm_s390_handle_diag, 265TRACE_EVENT(kvm_s390_handle_diag,
276 TP_PROTO(VCPU_PROTO_COMMON, __u16 code), 266 TP_PROTO(VCPU_PROTO_COMMON, __u16 code),
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index ea4a31b95990..66ba60c9b77e 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -958,8 +958,10 @@ void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
958 unsigned long addr, next; 958 unsigned long addr, next;
959 pgd_t *pgd; 959 pgd_t *pgd;
960 960
961 down_write(&mm->mmap_sem);
962 if (init_skey && mm_use_skey(mm))
963 goto out_up;
961 addr = start; 964 addr = start;
962 down_read(&mm->mmap_sem);
963 pgd = pgd_offset(mm, addr); 965 pgd = pgd_offset(mm, addr);
964 do { 966 do {
965 next = pgd_addr_end(addr, end); 967 next = pgd_addr_end(addr, end);
@@ -967,7 +969,10 @@ void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
967 continue; 969 continue;
968 next = page_table_reset_pud(mm, pgd, addr, next, init_skey); 970 next = page_table_reset_pud(mm, pgd, addr, next, init_skey);
969 } while (pgd++, addr = next, addr != end); 971 } while (pgd++, addr = next, addr != end);
970 up_read(&mm->mmap_sem); 972 if (init_skey)
973 current->mm->context.use_skey = 1;
974out_up:
975 up_write(&mm->mmap_sem);
971} 976}
972EXPORT_SYMBOL(page_table_reset_pgste); 977EXPORT_SYMBOL(page_table_reset_pgste);
973 978
@@ -1384,19 +1389,6 @@ EXPORT_SYMBOL_GPL(s390_enable_sie);
1384 */ 1389 */
1385void s390_enable_skey(void) 1390void s390_enable_skey(void)
1386{ 1391{
1387 /*
1388 * To avoid races between multiple vcpus, ending in calling
1389 * page_table_reset twice or more,
1390 * the page_table_lock is taken for serialization.
1391 */
1392 spin_lock(&current->mm->page_table_lock);
1393 if (mm_use_skey(current->mm)) {
1394 spin_unlock(&current->mm->page_table_lock);
1395 return;
1396 }
1397
1398 current->mm->context.use_skey = 1;
1399 spin_unlock(&current->mm->page_table_lock);
1400 page_table_reset_pgste(current->mm, 0, TASK_SIZE, true); 1392 page_table_reset_pgste(current->mm, 0, TASK_SIZE, true);
1401} 1393}
1402EXPORT_SYMBOL_GPL(s390_enable_skey); 1394EXPORT_SYMBOL_GPL(s390_enable_skey);
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index b57fe0efb422..1918d9dff45d 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -27,7 +27,9 @@ struct read_info_sccb {
27 u8 loadparm[8]; /* 24-31 */ 27 u8 loadparm[8]; /* 24-31 */
28 u8 _reserved1[48 - 32]; /* 32-47 */ 28 u8 _reserved1[48 - 32]; /* 32-47 */
29 u64 facilities; /* 48-55 */ 29 u64 facilities; /* 48-55 */
30 u8 _reserved2[84 - 56]; /* 56-83 */ 30 u8 _reserved2a[76 - 56]; /* 56-75 */
31 u32 ibc; /* 76-79 */
32 u8 _reserved2b[84 - 80]; /* 80-83 */
31 u8 fac84; /* 84 */ 33 u8 fac84; /* 84 */
32 u8 fac85; /* 85 */ 34 u8 fac85; /* 85 */
33 u8 _reserved3[91 - 86]; /* 86-90 */ 35 u8 _reserved3[91 - 86]; /* 86-90 */
@@ -47,6 +49,7 @@ static unsigned long sclp_hsa_size;
47static unsigned int sclp_max_cpu; 49static unsigned int sclp_max_cpu;
48static struct sclp_ipl_info sclp_ipl_info; 50static struct sclp_ipl_info sclp_ipl_info;
49static unsigned char sclp_siif; 51static unsigned char sclp_siif;
52static u32 sclp_ibc;
50 53
51u64 sclp_facilities; 54u64 sclp_facilities;
52u8 sclp_fac84; 55u8 sclp_fac84;
@@ -111,6 +114,7 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
111 sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; 114 sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
112 sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; 115 sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
113 sclp_rzm <<= 20; 116 sclp_rzm <<= 20;
117 sclp_ibc = sccb->ibc;
114 118
115 if (!sccb->hcpua) { 119 if (!sccb->hcpua) {
116 if (MACHINE_IS_VM) 120 if (MACHINE_IS_VM)
@@ -168,6 +172,12 @@ int sclp_has_siif(void)
168} 172}
169EXPORT_SYMBOL(sclp_has_siif); 173EXPORT_SYMBOL(sclp_has_siif);
170 174
175unsigned int sclp_get_ibc(void)
176{
177 return sclp_ibc;
178}
179EXPORT_SYMBOL(sclp_get_ibc);
180
171/* 181/*
172 * This function will be called after sclp_facilities_detect(), which gets 182 * This function will be called after sclp_facilities_detect(), which gets
173 * called from early.c code. The sclp_facilities_detect() function retrieves 183 * called from early.c code. The sclp_facilities_detect() function retrieves