aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-18 19:05:28 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-18 19:05:28 -0500
commit66dcff86ba40eebb5133cccf450878f2bba102ef (patch)
treee7eb49ad9316989a529b00303d2dd2cffa61a7f5 /arch/s390/kvm
parent91ed9e8a32d9a76adc59c83f8b40024076cf8a02 (diff)
parent2c4aa55a6af070262cca425745e8e54310e96b8d (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM update from Paolo Bonzini: "3.19 changes for KVM: - spring cleaning: removed support for IA64, and for hardware- assisted virtualization on the PPC970 - ARM, PPC, s390 all had only small fixes For x86: - small performance improvements (though only on weird guests) - usual round of hardware-compliancy fixes from Nadav - APICv fixes - XSAVES support for hosts and guests. XSAVES hosts were broken because the (non-KVM) XSAVES patches inadvertently changed the KVM userspace ABI whenever XSAVES was enabled; hence, this part is going to stable. Guest support is just a matter of exposing the feature and CPUID leaves support" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (179 commits) KVM: move APIC types to arch/x86/ KVM: PPC: Book3S: Enable in-kernel XICS emulation by default KVM: PPC: Book3S HV: Improve H_CONFER implementation KVM: PPC: Book3S HV: Fix endianness of instruction obtained from HEIR register KVM: PPC: Book3S HV: Remove code for PPC970 processors KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions KVM: PPC: Book3S HV: Simplify locking around stolen time calculations arch: powerpc: kvm: book3s_paired_singles.c: Remove unused function arch: powerpc: kvm: book3s_pr.c: Remove unused function arch: powerpc: kvm: book3s.c: Remove some unused functions arch: powerpc: kvm: book3s_32_mmu.c: Remove unused function KVM: PPC: Book3S HV: Check wait conditions before sleeping in kvmppc_vcore_blocked KVM: PPC: Book3S HV: ptes are big endian KVM: PPC: Book3S HV: Fix inaccuracies in ICP emulation for H_IPI KVM: PPC: Book3S HV: Fix KSM memory corruption KVM: PPC: Book3S HV: Fix an issue where guest is paused on receiving HMI KVM: PPC: Book3S HV: Fix computation of tlbie operand KVM: PPC: Book3S HV: Add missing HPTE unlock KVM: PPC: BookE: Improve irq inject tracepoint arm/arm64: KVM: Require in-kernel vgic for the arch timers ...
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/gaccess.c40
-rw-r--r--arch/s390/kvm/intercept.c20
-rw-r--r--arch/s390/kvm/interrupt.c1044
-rw-r--r--arch/s390/kvm/kvm-s390.c22
-rw-r--r--arch/s390/kvm/kvm-s390.h11
-rw-r--r--arch/s390/kvm/priv.c95
-rw-r--r--arch/s390/kvm/sigp.c305
7 files changed, 954 insertions, 583 deletions
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 0f961a1c64b3..8b9ccf02a2c5 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -207,8 +207,6 @@ union raddress {
207 unsigned long pfra : 52; /* Page-Frame Real Address */ 207 unsigned long pfra : 52; /* Page-Frame Real Address */
208}; 208};
209 209
210static int ipte_lock_count;
211static DEFINE_MUTEX(ipte_mutex);
212 210
213int ipte_lock_held(struct kvm_vcpu *vcpu) 211int ipte_lock_held(struct kvm_vcpu *vcpu)
214{ 212{
@@ -216,47 +214,51 @@ int ipte_lock_held(struct kvm_vcpu *vcpu)
216 214
217 if (vcpu->arch.sie_block->eca & 1) 215 if (vcpu->arch.sie_block->eca & 1)
218 return ic->kh != 0; 216 return ic->kh != 0;
219 return ipte_lock_count != 0; 217 return vcpu->kvm->arch.ipte_lock_count != 0;
220} 218}
221 219
222static void ipte_lock_simple(struct kvm_vcpu *vcpu) 220static void ipte_lock_simple(struct kvm_vcpu *vcpu)
223{ 221{
224 union ipte_control old, new, *ic; 222 union ipte_control old, new, *ic;
225 223
226 mutex_lock(&ipte_mutex); 224 mutex_lock(&vcpu->kvm->arch.ipte_mutex);
227 ipte_lock_count++; 225 vcpu->kvm->arch.ipte_lock_count++;
228 if (ipte_lock_count > 1) 226 if (vcpu->kvm->arch.ipte_lock_count > 1)
229 goto out; 227 goto out;
230 ic = &vcpu->kvm->arch.sca->ipte_control; 228 ic = &vcpu->kvm->arch.sca->ipte_control;
231 do { 229 do {
232 old = ACCESS_ONCE(*ic); 230 old = *ic;
231 barrier();
233 while (old.k) { 232 while (old.k) {
234 cond_resched(); 233 cond_resched();
235 old = ACCESS_ONCE(*ic); 234 old = *ic;
235 barrier();
236 } 236 }
237 new = old; 237 new = old;
238 new.k = 1; 238 new.k = 1;
239 } while (cmpxchg(&ic->val, old.val, new.val) != old.val); 239 } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
240out: 240out:
241 mutex_unlock(&ipte_mutex); 241 mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
242} 242}
243 243
244static void ipte_unlock_simple(struct kvm_vcpu *vcpu) 244static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
245{ 245{
246 union ipte_control old, new, *ic; 246 union ipte_control old, new, *ic;
247 247
248 mutex_lock(&ipte_mutex); 248 mutex_lock(&vcpu->kvm->arch.ipte_mutex);
249 ipte_lock_count--; 249 vcpu->kvm->arch.ipte_lock_count--;
250 if (ipte_lock_count) 250 if (vcpu->kvm->arch.ipte_lock_count)
251 goto out; 251 goto out;
252 ic = &vcpu->kvm->arch.sca->ipte_control; 252 ic = &vcpu->kvm->arch.sca->ipte_control;
253 do { 253 do {
254 new = old = ACCESS_ONCE(*ic); 254 old = *ic;
255 barrier();
256 new = old;
255 new.k = 0; 257 new.k = 0;
256 } while (cmpxchg(&ic->val, old.val, new.val) != old.val); 258 } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
257 wake_up(&vcpu->kvm->arch.ipte_wq); 259 wake_up(&vcpu->kvm->arch.ipte_wq);
258out: 260out:
259 mutex_unlock(&ipte_mutex); 261 mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
260} 262}
261 263
262static void ipte_lock_siif(struct kvm_vcpu *vcpu) 264static void ipte_lock_siif(struct kvm_vcpu *vcpu)
@@ -265,10 +267,12 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu)
265 267
266 ic = &vcpu->kvm->arch.sca->ipte_control; 268 ic = &vcpu->kvm->arch.sca->ipte_control;
267 do { 269 do {
268 old = ACCESS_ONCE(*ic); 270 old = *ic;
271 barrier();
269 while (old.kg) { 272 while (old.kg) {
270 cond_resched(); 273 cond_resched();
271 old = ACCESS_ONCE(*ic); 274 old = *ic;
275 barrier();
272 } 276 }
273 new = old; 277 new = old;
274 new.k = 1; 278 new.k = 1;
@@ -282,7 +286,9 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
282 286
283 ic = &vcpu->kvm->arch.sca->ipte_control; 287 ic = &vcpu->kvm->arch.sca->ipte_control;
284 do { 288 do {
285 new = old = ACCESS_ONCE(*ic); 289 old = *ic;
290 barrier();
291 new = old;
286 new.kh--; 292 new.kh--;
287 if (!new.kh) 293 if (!new.kh)
288 new.k = 0; 294 new.k = 0;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index eaf46291d361..81c77ab8102e 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -38,6 +38,19 @@ static const intercept_handler_t instruction_handlers[256] = {
38 [0xeb] = kvm_s390_handle_eb, 38 [0xeb] = kvm_s390_handle_eb,
39}; 39};
40 40
41void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc)
42{
43 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
44
45 /* Use the length of the EXECUTE instruction if necessary */
46 if (sie_block->icptstatus & 1) {
47 ilc = (sie_block->icptstatus >> 4) & 0x6;
48 if (!ilc)
49 ilc = 4;
50 }
51 sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilc);
52}
53
41static int handle_noop(struct kvm_vcpu *vcpu) 54static int handle_noop(struct kvm_vcpu *vcpu)
42{ 55{
43 switch (vcpu->arch.sie_block->icptcode) { 56 switch (vcpu->arch.sie_block->icptcode) {
@@ -244,7 +257,7 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
244static int handle_external_interrupt(struct kvm_vcpu *vcpu) 257static int handle_external_interrupt(struct kvm_vcpu *vcpu)
245{ 258{
246 u16 eic = vcpu->arch.sie_block->eic; 259 u16 eic = vcpu->arch.sie_block->eic;
247 struct kvm_s390_interrupt irq; 260 struct kvm_s390_irq irq;
248 psw_t newpsw; 261 psw_t newpsw;
249 int rc; 262 int rc;
250 263
@@ -269,7 +282,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
269 if (kvm_s390_si_ext_call_pending(vcpu)) 282 if (kvm_s390_si_ext_call_pending(vcpu))
270 return 0; 283 return 0;
271 irq.type = KVM_S390_INT_EXTERNAL_CALL; 284 irq.type = KVM_S390_INT_EXTERNAL_CALL;
272 irq.parm = vcpu->arch.sie_block->extcpuaddr; 285 irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
273 break; 286 break;
274 default: 287 default:
275 return -EOPNOTSUPP; 288 return -EOPNOTSUPP;
@@ -288,7 +301,6 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
288 */ 301 */
289static int handle_mvpg_pei(struct kvm_vcpu *vcpu) 302static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
290{ 303{
291 psw_t *psw = &vcpu->arch.sie_block->gpsw;
292 unsigned long srcaddr, dstaddr; 304 unsigned long srcaddr, dstaddr;
293 int reg1, reg2, rc; 305 int reg1, reg2, rc;
294 306
@@ -310,7 +322,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
310 if (rc != 0) 322 if (rc != 0)
311 return rc; 323 return rc;
312 324
313 psw->addr = __rewind_psw(*psw, 4); 325 kvm_s390_rewind_psw(vcpu, 4);
314 326
315 return 0; 327 return 0;
316} 328}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index a39838457f01..f00f31e66cd8 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -16,6 +16,7 @@
16#include <linux/mmu_context.h> 16#include <linux/mmu_context.h>
17#include <linux/signal.h> 17#include <linux/signal.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/bitmap.h>
19#include <asm/asm-offsets.h> 20#include <asm/asm-offsets.h>
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21#include "kvm-s390.h" 22#include "kvm-s390.h"
@@ -27,8 +28,8 @@
27#define IOINT_CSSID_MASK 0x03fc0000 28#define IOINT_CSSID_MASK 0x03fc0000
28#define IOINT_AI_MASK 0x04000000 29#define IOINT_AI_MASK 0x04000000
29#define PFAULT_INIT 0x0600 30#define PFAULT_INIT 0x0600
30 31#define PFAULT_DONE 0x0680
31static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu); 32#define VIRTIO_PARAM 0x0d00
32 33
33static int is_ioint(u64 type) 34static int is_ioint(u64 type)
34{ 35{
@@ -136,6 +137,31 @@ static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
136 return 0; 137 return 0;
137} 138}
138 139
140static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
141{
142 return vcpu->arch.local_int.pending_irqs;
143}
144
145static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
146{
147 unsigned long active_mask = pending_local_irqs(vcpu);
148
149 if (psw_extint_disabled(vcpu))
150 active_mask &= ~IRQ_PEND_EXT_MASK;
151 if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
152 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
153 if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
154 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
155 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
156 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
157 if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
158 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
159 if (psw_mchk_disabled(vcpu))
160 active_mask &= ~IRQ_PEND_MCHK_MASK;
161
162 return active_mask;
163}
164
139static void __set_cpu_idle(struct kvm_vcpu *vcpu) 165static void __set_cpu_idle(struct kvm_vcpu *vcpu)
140{ 166{
141 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 167 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
@@ -170,26 +196,45 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
170 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); 196 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
171} 197}
172 198
199static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
200{
201 if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
202 return;
203 if (psw_extint_disabled(vcpu))
204 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
205 else
206 vcpu->arch.sie_block->lctl |= LCTL_CR0;
207}
208
209static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
210{
211 if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
212 return;
213 if (psw_mchk_disabled(vcpu))
214 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
215 else
216 vcpu->arch.sie_block->lctl |= LCTL_CR14;
217}
218
219/* Set interception request for non-deliverable local interrupts */
220static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
221{
222 set_intercept_indicators_ext(vcpu);
223 set_intercept_indicators_mchk(vcpu);
224}
225
173static void __set_intercept_indicator(struct kvm_vcpu *vcpu, 226static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
174 struct kvm_s390_interrupt_info *inti) 227 struct kvm_s390_interrupt_info *inti)
175{ 228{
176 switch (inti->type) { 229 switch (inti->type) {
177 case KVM_S390_INT_EXTERNAL_CALL:
178 case KVM_S390_INT_EMERGENCY:
179 case KVM_S390_INT_SERVICE: 230 case KVM_S390_INT_SERVICE:
180 case KVM_S390_INT_PFAULT_INIT:
181 case KVM_S390_INT_PFAULT_DONE: 231 case KVM_S390_INT_PFAULT_DONE:
182 case KVM_S390_INT_VIRTIO: 232 case KVM_S390_INT_VIRTIO:
183 case KVM_S390_INT_CLOCK_COMP:
184 case KVM_S390_INT_CPU_TIMER:
185 if (psw_extint_disabled(vcpu)) 233 if (psw_extint_disabled(vcpu))
186 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 234 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
187 else 235 else
188 vcpu->arch.sie_block->lctl |= LCTL_CR0; 236 vcpu->arch.sie_block->lctl |= LCTL_CR0;
189 break; 237 break;
190 case KVM_S390_SIGP_STOP:
191 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
192 break;
193 case KVM_S390_MCHK: 238 case KVM_S390_MCHK:
194 if (psw_mchk_disabled(vcpu)) 239 if (psw_mchk_disabled(vcpu))
195 vcpu->arch.sie_block->ictl |= ICTL_LPSW; 240 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
@@ -226,13 +271,236 @@ static u16 get_ilc(struct kvm_vcpu *vcpu)
226 } 271 }
227} 272}
228 273
229static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu, 274static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
230 struct kvm_s390_pgm_info *pgm_info) 275{
276 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
277 int rc;
278
279 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
280 0, 0);
281
282 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
283 (u16 *)__LC_EXT_INT_CODE);
284 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
285 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
286 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
287 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
288 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
289 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
290 return rc ? -EFAULT : 0;
291}
292
293static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
294{
295 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
296 int rc;
297
298 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
299 0, 0);
300
301 rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
302 (u16 __user *)__LC_EXT_INT_CODE);
303 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
304 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
305 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
306 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
307 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
308 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
309 return rc ? -EFAULT : 0;
310}
311
312static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
313{
314 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
315 struct kvm_s390_ext_info ext;
316 int rc;
317
318 spin_lock(&li->lock);
319 ext = li->irq.ext;
320 clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
321 li->irq.ext.ext_params2 = 0;
322 spin_unlock(&li->lock);
323
324 VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx",
325 0, ext.ext_params2);
326 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
327 KVM_S390_INT_PFAULT_INIT,
328 0, ext.ext_params2);
329
330 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
331 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
332 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
333 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
334 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
335 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
336 rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
337 return rc ? -EFAULT : 0;
338}
339
340static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
341{
342 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
343 struct kvm_s390_mchk_info mchk;
344 int rc;
345
346 spin_lock(&li->lock);
347 mchk = li->irq.mchk;
348 /*
349 * If there was an exigent machine check pending, then any repressible
350 * machine checks that might have been pending are indicated along
351 * with it, so always clear both bits
352 */
353 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
354 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
355 memset(&li->irq.mchk, 0, sizeof(mchk));
356 spin_unlock(&li->lock);
357
358 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
359 mchk.mcic);
360 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
361 mchk.cr14, mchk.mcic);
362
363 rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
364 rc |= put_guest_lc(vcpu, mchk.mcic,
365 (u64 __user *) __LC_MCCK_CODE);
366 rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
367 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
368 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
369 &mchk.fixed_logout, sizeof(mchk.fixed_logout));
370 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
371 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
372 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
373 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
374 return rc ? -EFAULT : 0;
375}
376
377static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
378{
379 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
380 int rc;
381
382 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
383 vcpu->stat.deliver_restart_signal++;
384 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
385
386 rc = write_guest_lc(vcpu,
387 offsetof(struct _lowcore, restart_old_psw),
388 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
389 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
390 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
391 clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
392 return rc ? -EFAULT : 0;
393}
394
395static int __must_check __deliver_stop(struct kvm_vcpu *vcpu)
396{
397 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
398 vcpu->stat.deliver_stop_signal++;
399 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_STOP,
400 0, 0);
401
402 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
403 clear_bit(IRQ_PEND_SIGP_STOP, &vcpu->arch.local_int.pending_irqs);
404 return 0;
405}
406
407static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
408{
409 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
410 struct kvm_s390_prefix_info prefix;
411
412 spin_lock(&li->lock);
413 prefix = li->irq.prefix;
414 li->irq.prefix.address = 0;
415 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
416 spin_unlock(&li->lock);
417
418 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address);
419 vcpu->stat.deliver_prefix_signal++;
420 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
421 KVM_S390_SIGP_SET_PREFIX,
422 prefix.address, 0);
423
424 kvm_s390_set_prefix(vcpu, prefix.address);
425 return 0;
426}
427
428static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
429{
430 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
431 int rc;
432 int cpu_addr;
433
434 spin_lock(&li->lock);
435 cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
436 clear_bit(cpu_addr, li->sigp_emerg_pending);
437 if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
438 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
439 spin_unlock(&li->lock);
440
441 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
442 vcpu->stat.deliver_emergency_signal++;
443 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
444 cpu_addr, 0);
445
446 rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
447 (u16 *)__LC_EXT_INT_CODE);
448 rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
449 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
450 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
451 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
452 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
453 return rc ? -EFAULT : 0;
454}
455
456static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
457{
458 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
459 struct kvm_s390_extcall_info extcall;
460 int rc;
461
462 spin_lock(&li->lock);
463 extcall = li->irq.extcall;
464 li->irq.extcall.code = 0;
465 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
466 spin_unlock(&li->lock);
467
468 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
469 vcpu->stat.deliver_external_call++;
470 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
471 KVM_S390_INT_EXTERNAL_CALL,
472 extcall.code, 0);
473
474 rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
475 (u16 *)__LC_EXT_INT_CODE);
476 rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
477 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
478 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
479 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
480 sizeof(psw_t));
481 return rc ? -EFAULT : 0;
482}
483
484static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
231{ 485{
486 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
487 struct kvm_s390_pgm_info pgm_info;
232 int rc = 0; 488 int rc = 0;
233 u16 ilc = get_ilc(vcpu); 489 u16 ilc = get_ilc(vcpu);
234 490
235 switch (pgm_info->code & ~PGM_PER) { 491 spin_lock(&li->lock);
492 pgm_info = li->irq.pgm;
493 clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
494 memset(&li->irq.pgm, 0, sizeof(pgm_info));
495 spin_unlock(&li->lock);
496
497 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
498 pgm_info.code, ilc);
499 vcpu->stat.deliver_program_int++;
500 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
501 pgm_info.code, 0);
502
503 switch (pgm_info.code & ~PGM_PER) {
236 case PGM_AFX_TRANSLATION: 504 case PGM_AFX_TRANSLATION:
237 case PGM_ASX_TRANSLATION: 505 case PGM_ASX_TRANSLATION:
238 case PGM_EX_TRANSLATION: 506 case PGM_EX_TRANSLATION:
@@ -243,7 +511,7 @@ static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu,
243 case PGM_PRIMARY_AUTHORITY: 511 case PGM_PRIMARY_AUTHORITY:
244 case PGM_SECONDARY_AUTHORITY: 512 case PGM_SECONDARY_AUTHORITY:
245 case PGM_SPACE_SWITCH: 513 case PGM_SPACE_SWITCH:
246 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, 514 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
247 (u64 *)__LC_TRANS_EXC_CODE); 515 (u64 *)__LC_TRANS_EXC_CODE);
248 break; 516 break;
249 case PGM_ALEN_TRANSLATION: 517 case PGM_ALEN_TRANSLATION:
@@ -252,7 +520,7 @@ static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu,
252 case PGM_ASTE_SEQUENCE: 520 case PGM_ASTE_SEQUENCE:
253 case PGM_ASTE_VALIDITY: 521 case PGM_ASTE_VALIDITY:
254 case PGM_EXTENDED_AUTHORITY: 522 case PGM_EXTENDED_AUTHORITY:
255 rc = put_guest_lc(vcpu, pgm_info->exc_access_id, 523 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
256 (u8 *)__LC_EXC_ACCESS_ID); 524 (u8 *)__LC_EXC_ACCESS_ID);
257 break; 525 break;
258 case PGM_ASCE_TYPE: 526 case PGM_ASCE_TYPE:
@@ -261,247 +529,208 @@ static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu,
261 case PGM_REGION_SECOND_TRANS: 529 case PGM_REGION_SECOND_TRANS:
262 case PGM_REGION_THIRD_TRANS: 530 case PGM_REGION_THIRD_TRANS:
263 case PGM_SEGMENT_TRANSLATION: 531 case PGM_SEGMENT_TRANSLATION:
264 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, 532 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
265 (u64 *)__LC_TRANS_EXC_CODE); 533 (u64 *)__LC_TRANS_EXC_CODE);
266 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, 534 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
267 (u8 *)__LC_EXC_ACCESS_ID); 535 (u8 *)__LC_EXC_ACCESS_ID);
268 rc |= put_guest_lc(vcpu, pgm_info->op_access_id, 536 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
269 (u8 *)__LC_OP_ACCESS_ID); 537 (u8 *)__LC_OP_ACCESS_ID);
270 break; 538 break;
271 case PGM_MONITOR: 539 case PGM_MONITOR:
272 rc = put_guest_lc(vcpu, pgm_info->mon_class_nr, 540 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
273 (u64 *)__LC_MON_CLASS_NR); 541 (u16 *)__LC_MON_CLASS_NR);
274 rc |= put_guest_lc(vcpu, pgm_info->mon_code, 542 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
275 (u64 *)__LC_MON_CODE); 543 (u64 *)__LC_MON_CODE);
276 break; 544 break;
277 case PGM_DATA: 545 case PGM_DATA:
278 rc = put_guest_lc(vcpu, pgm_info->data_exc_code, 546 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
279 (u32 *)__LC_DATA_EXC_CODE); 547 (u32 *)__LC_DATA_EXC_CODE);
280 break; 548 break;
281 case PGM_PROTECTION: 549 case PGM_PROTECTION:
282 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, 550 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
283 (u64 *)__LC_TRANS_EXC_CODE); 551 (u64 *)__LC_TRANS_EXC_CODE);
284 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, 552 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
285 (u8 *)__LC_EXC_ACCESS_ID); 553 (u8 *)__LC_EXC_ACCESS_ID);
286 break; 554 break;
287 } 555 }
288 556
289 if (pgm_info->code & PGM_PER) { 557 if (pgm_info.code & PGM_PER) {
290 rc |= put_guest_lc(vcpu, pgm_info->per_code, 558 rc |= put_guest_lc(vcpu, pgm_info.per_code,
291 (u8 *) __LC_PER_CODE); 559 (u8 *) __LC_PER_CODE);
292 rc |= put_guest_lc(vcpu, pgm_info->per_atmid, 560 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
293 (u8 *)__LC_PER_ATMID); 561 (u8 *)__LC_PER_ATMID);
294 rc |= put_guest_lc(vcpu, pgm_info->per_address, 562 rc |= put_guest_lc(vcpu, pgm_info.per_address,
295 (u64 *) __LC_PER_ADDRESS); 563 (u64 *) __LC_PER_ADDRESS);
296 rc |= put_guest_lc(vcpu, pgm_info->per_access_id, 564 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
297 (u8 *) __LC_PER_ACCESS_ID); 565 (u8 *) __LC_PER_ACCESS_ID);
298 } 566 }
299 567
300 rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); 568 rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
301 rc |= put_guest_lc(vcpu, pgm_info->code, 569 rc |= put_guest_lc(vcpu, pgm_info.code,
302 (u16 *)__LC_PGM_INT_CODE); 570 (u16 *)__LC_PGM_INT_CODE);
303 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, 571 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
304 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 572 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
305 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW, 573 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
306 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 574 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
575 return rc ? -EFAULT : 0;
576}
307 577
308 return rc; 578static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
579 struct kvm_s390_interrupt_info *inti)
580{
581 int rc;
582
583 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
584 inti->ext.ext_params);
585 vcpu->stat.deliver_service_signal++;
586 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
587 inti->ext.ext_params, 0);
588
589 rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
590 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
591 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
592 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
593 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
594 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
595 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
596 (u32 *)__LC_EXT_PARAMS);
597 return rc ? -EFAULT : 0;
309} 598}
310 599
311static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu, 600static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu,
312 struct kvm_s390_interrupt_info *inti) 601 struct kvm_s390_interrupt_info *inti)
313{ 602{
314 const unsigned short table[] = { 2, 4, 4, 6 }; 603 int rc;
315 int rc = 0; 604
605 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
606 KVM_S390_INT_PFAULT_DONE, 0,
607 inti->ext.ext_params2);
608
609 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
610 rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR);
611 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
612 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
613 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
614 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
615 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
616 (u64 *)__LC_EXT_PARAMS2);
617 return rc ? -EFAULT : 0;
618}
619
620static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu,
621 struct kvm_s390_interrupt_info *inti)
622{
623 int rc;
624
625 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
626 inti->ext.ext_params, inti->ext.ext_params2);
627 vcpu->stat.deliver_virtio_interrupt++;
628 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
629 inti->ext.ext_params,
630 inti->ext.ext_params2);
631
632 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
633 rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR);
634 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
635 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
636 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
637 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
638 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
639 (u32 *)__LC_EXT_PARAMS);
640 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
641 (u64 *)__LC_EXT_PARAMS2);
642 return rc ? -EFAULT : 0;
643}
644
645static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
646 struct kvm_s390_interrupt_info *inti)
647{
648 int rc;
649
650 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
651 vcpu->stat.deliver_io_int++;
652 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
653 ((__u32)inti->io.subchannel_id << 16) |
654 inti->io.subchannel_nr,
655 ((__u64)inti->io.io_int_parm << 32) |
656 inti->io.io_int_word);
657
658 rc = put_guest_lc(vcpu, inti->io.subchannel_id,
659 (u16 *)__LC_SUBCHANNEL_ID);
660 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
661 (u16 *)__LC_SUBCHANNEL_NR);
662 rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
663 (u32 *)__LC_IO_INT_PARM);
664 rc |= put_guest_lc(vcpu, inti->io.io_int_word,
665 (u32 *)__LC_IO_INT_WORD);
666 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
667 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
668 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
669 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
670 return rc ? -EFAULT : 0;
671}
672
673static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu,
674 struct kvm_s390_interrupt_info *inti)
675{
676 struct kvm_s390_mchk_info *mchk = &inti->mchk;
677 int rc;
678
679 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
680 mchk->mcic);
681 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
682 mchk->cr14, mchk->mcic);
683
684 rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
685 rc |= put_guest_lc(vcpu, mchk->mcic,
686 (u64 __user *) __LC_MCCK_CODE);
687 rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
688 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
689 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
690 &mchk->fixed_logout, sizeof(mchk->fixed_logout));
691 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
692 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
693 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
694 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
695 return rc ? -EFAULT : 0;
696}
697
698typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
699
700static const deliver_irq_t deliver_irq_funcs[] = {
701 [IRQ_PEND_MCHK_EX] = __deliver_machine_check,
702 [IRQ_PEND_PROG] = __deliver_prog,
703 [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
704 [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
705 [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
706 [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
707 [IRQ_PEND_RESTART] = __deliver_restart,
708 [IRQ_PEND_SIGP_STOP] = __deliver_stop,
709 [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
710 [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
711};
712
713static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
714 struct kvm_s390_interrupt_info *inti)
715{
716 int rc;
316 717
317 switch (inti->type) { 718 switch (inti->type) {
318 case KVM_S390_INT_EMERGENCY:
319 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
320 vcpu->stat.deliver_emergency_signal++;
321 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
322 inti->emerg.code, 0);
323 rc = put_guest_lc(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE);
324 rc |= put_guest_lc(vcpu, inti->emerg.code,
325 (u16 *)__LC_EXT_CPU_ADDR);
326 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
327 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
328 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
329 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
330 break;
331 case KVM_S390_INT_EXTERNAL_CALL:
332 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
333 vcpu->stat.deliver_external_call++;
334 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
335 inti->extcall.code, 0);
336 rc = put_guest_lc(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE);
337 rc |= put_guest_lc(vcpu, inti->extcall.code,
338 (u16 *)__LC_EXT_CPU_ADDR);
339 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
340 &vcpu->arch.sie_block->gpsw,
341 sizeof(psw_t));
342 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
343 &vcpu->arch.sie_block->gpsw,
344 sizeof(psw_t));
345 break;
346 case KVM_S390_INT_CLOCK_COMP:
347 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
348 inti->ext.ext_params, 0);
349 rc = deliver_ckc_interrupt(vcpu);
350 break;
351 case KVM_S390_INT_CPU_TIMER:
352 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
353 inti->ext.ext_params, 0);
354 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
355 (u16 *)__LC_EXT_INT_CODE);
356 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
357 &vcpu->arch.sie_block->gpsw,
358 sizeof(psw_t));
359 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
360 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
361 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
362 (u32 *)__LC_EXT_PARAMS);
363 break;
364 case KVM_S390_INT_SERVICE: 719 case KVM_S390_INT_SERVICE:
365 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", 720 rc = __deliver_service(vcpu, inti);
366 inti->ext.ext_params);
367 vcpu->stat.deliver_service_signal++;
368 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
369 inti->ext.ext_params, 0);
370 rc = put_guest_lc(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE);
371 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
372 &vcpu->arch.sie_block->gpsw,
373 sizeof(psw_t));
374 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
375 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
376 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
377 (u32 *)__LC_EXT_PARAMS);
378 break;
379 case KVM_S390_INT_PFAULT_INIT:
380 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
381 inti->ext.ext_params2);
382 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
383 (u16 *) __LC_EXT_INT_CODE);
384 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
385 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
386 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
387 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
388 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
389 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
390 (u64 *) __LC_EXT_PARAMS2);
391 break; 721 break;
392 case KVM_S390_INT_PFAULT_DONE: 722 case KVM_S390_INT_PFAULT_DONE:
393 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, 723 rc = __deliver_pfault_done(vcpu, inti);
394 inti->ext.ext_params2);
395 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE);
396 rc |= put_guest_lc(vcpu, 0x0680, (u16 *)__LC_EXT_CPU_ADDR);
397 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
398 &vcpu->arch.sie_block->gpsw,
399 sizeof(psw_t));
400 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
401 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
402 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
403 (u64 *)__LC_EXT_PARAMS2);
404 break; 724 break;
405 case KVM_S390_INT_VIRTIO: 725 case KVM_S390_INT_VIRTIO:
406 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", 726 rc = __deliver_virtio(vcpu, inti);
407 inti->ext.ext_params, inti->ext.ext_params2);
408 vcpu->stat.deliver_virtio_interrupt++;
409 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
410 inti->ext.ext_params,
411 inti->ext.ext_params2);
412 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE);
413 rc |= put_guest_lc(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR);
414 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
415 &vcpu->arch.sie_block->gpsw,
416 sizeof(psw_t));
417 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
418 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
419 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
420 (u32 *)__LC_EXT_PARAMS);
421 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
422 (u64 *)__LC_EXT_PARAMS2);
423 break;
424 case KVM_S390_SIGP_STOP:
425 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
426 vcpu->stat.deliver_stop_signal++;
427 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
428 0, 0);
429 __set_intercept_indicator(vcpu, inti);
430 break;
431
432 case KVM_S390_SIGP_SET_PREFIX:
433 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
434 inti->prefix.address);
435 vcpu->stat.deliver_prefix_signal++;
436 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
437 inti->prefix.address, 0);
438 kvm_s390_set_prefix(vcpu, inti->prefix.address);
439 break;
440
441 case KVM_S390_RESTART:
442 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
443 vcpu->stat.deliver_restart_signal++;
444 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
445 0, 0);
446 rc = write_guest_lc(vcpu,
447 offsetof(struct _lowcore, restart_old_psw),
448 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
449 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
450 &vcpu->arch.sie_block->gpsw,
451 sizeof(psw_t));
452 break; 727 break;
453 case KVM_S390_PROGRAM_INT:
454 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
455 inti->pgm.code,
456 table[vcpu->arch.sie_block->ipa >> 14]);
457 vcpu->stat.deliver_program_int++;
458 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
459 inti->pgm.code, 0);
460 rc = __deliver_prog_irq(vcpu, &inti->pgm);
461 break;
462
463 case KVM_S390_MCHK: 728 case KVM_S390_MCHK:
464 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", 729 rc = __deliver_mchk_floating(vcpu, inti);
465 inti->mchk.mcic);
466 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
467 inti->mchk.cr14,
468 inti->mchk.mcic);
469 rc = kvm_s390_vcpu_store_status(vcpu,
470 KVM_S390_STORE_STATUS_PREFIXED);
471 rc |= put_guest_lc(vcpu, inti->mchk.mcic, (u64 *)__LC_MCCK_CODE);
472 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
473 &vcpu->arch.sie_block->gpsw,
474 sizeof(psw_t));
475 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
476 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
477 break; 730 break;
478
479 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 731 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
480 { 732 rc = __deliver_io(vcpu, inti);
481 __u32 param0 = ((__u32)inti->io.subchannel_id << 16) |
482 inti->io.subchannel_nr;
483 __u64 param1 = ((__u64)inti->io.io_int_parm << 32) |
484 inti->io.io_int_word;
485 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
486 vcpu->stat.deliver_io_int++;
487 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
488 param0, param1);
489 rc = put_guest_lc(vcpu, inti->io.subchannel_id,
490 (u16 *)__LC_SUBCHANNEL_ID);
491 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
492 (u16 *)__LC_SUBCHANNEL_NR);
493 rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
494 (u32 *)__LC_IO_INT_PARM);
495 rc |= put_guest_lc(vcpu, inti->io.io_int_word,
496 (u32 *)__LC_IO_INT_WORD);
497 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
498 &vcpu->arch.sie_block->gpsw,
499 sizeof(psw_t));
500 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
501 &vcpu->arch.sie_block->gpsw,
502 sizeof(psw_t));
503 break; 733 break;
504 }
505 default: 734 default:
506 BUG(); 735 BUG();
507 } 736 }
@@ -509,19 +738,6 @@ static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu,
509 return rc; 738 return rc;
510} 739}
511 740
512static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
513{
514 int rc;
515
516 rc = put_guest_lc(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE);
517 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
518 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
519 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
520 &vcpu->arch.sie_block->gpsw,
521 sizeof(psw_t));
522 return rc;
523}
524
525/* Check whether SIGP interpretation facility has an external call pending */ 741/* Check whether SIGP interpretation facility has an external call pending */
526int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu) 742int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
527{ 743{
@@ -538,20 +754,11 @@ int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
538 754
539int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 755int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
540{ 756{
541 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
542 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 757 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
543 struct kvm_s390_interrupt_info *inti; 758 struct kvm_s390_interrupt_info *inti;
544 int rc = 0; 759 int rc;
545 760
546 if (atomic_read(&li->active)) { 761 rc = !!deliverable_local_irqs(vcpu);
547 spin_lock(&li->lock);
548 list_for_each_entry(inti, &li->list, list)
549 if (__interrupt_is_deliverable(vcpu, inti)) {
550 rc = 1;
551 break;
552 }
553 spin_unlock(&li->lock);
554 }
555 762
556 if ((!rc) && atomic_read(&fi->active)) { 763 if ((!rc) && atomic_read(&fi->active)) {
557 spin_lock(&fi->lock); 764 spin_lock(&fi->lock);
@@ -643,18 +850,15 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
643void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) 850void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
644{ 851{
645 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 852 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
646 struct kvm_s390_interrupt_info *n, *inti = NULL;
647 853
648 spin_lock(&li->lock); 854 spin_lock(&li->lock);
649 list_for_each_entry_safe(inti, n, &li->list, list) { 855 li->pending_irqs = 0;
650 list_del(&inti->list); 856 bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
651 kfree(inti); 857 memset(&li->irq, 0, sizeof(li->irq));
652 }
653 atomic_set(&li->active, 0);
654 spin_unlock(&li->lock); 858 spin_unlock(&li->lock);
655 859
656 /* clear pending external calls set by sigp interpretation facility */ 860 /* clear pending external calls set by sigp interpretation facility */
657 atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); 861 atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
658 atomic_clear_mask(SIGP_CTRL_C, 862 atomic_clear_mask(SIGP_CTRL_C,
659 &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl); 863 &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
660} 864}
@@ -664,34 +868,35 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
664 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 868 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
665 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 869 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
666 struct kvm_s390_interrupt_info *n, *inti = NULL; 870 struct kvm_s390_interrupt_info *n, *inti = NULL;
871 deliver_irq_t func;
667 int deliver; 872 int deliver;
668 int rc = 0; 873 int rc = 0;
874 unsigned long irq_type;
875 unsigned long deliverable_irqs;
669 876
670 __reset_intercept_indicators(vcpu); 877 __reset_intercept_indicators(vcpu);
671 if (atomic_read(&li->active)) {
672 do {
673 deliver = 0;
674 spin_lock(&li->lock);
675 list_for_each_entry_safe(inti, n, &li->list, list) {
676 if (__interrupt_is_deliverable(vcpu, inti)) {
677 list_del(&inti->list);
678 deliver = 1;
679 break;
680 }
681 __set_intercept_indicator(vcpu, inti);
682 }
683 if (list_empty(&li->list))
684 atomic_set(&li->active, 0);
685 spin_unlock(&li->lock);
686 if (deliver) {
687 rc = __do_deliver_interrupt(vcpu, inti);
688 kfree(inti);
689 }
690 } while (!rc && deliver);
691 }
692 878
693 if (!rc && kvm_cpu_has_pending_timer(vcpu)) 879 /* pending ckc conditions might have been invalidated */
694 rc = deliver_ckc_interrupt(vcpu); 880 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
881 if (kvm_cpu_has_pending_timer(vcpu))
882 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
883
884 do {
885 deliverable_irqs = deliverable_local_irqs(vcpu);
886 /* bits are in the order of interrupt priority */
887 irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT);
888 if (irq_type == IRQ_PEND_COUNT)
889 break;
890 func = deliver_irq_funcs[irq_type];
891 if (!func) {
892 WARN_ON_ONCE(func == NULL);
893 clear_bit(irq_type, &li->pending_irqs);
894 continue;
895 }
896 rc = func(vcpu);
897 } while (!rc && irq_type != IRQ_PEND_COUNT);
898
899 set_intercept_indicators_local(vcpu);
695 900
696 if (!rc && atomic_read(&fi->active)) { 901 if (!rc && atomic_read(&fi->active)) {
697 do { 902 do {
@@ -710,7 +915,7 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
710 atomic_set(&fi->active, 0); 915 atomic_set(&fi->active, 0);
711 spin_unlock(&fi->lock); 916 spin_unlock(&fi->lock);
712 if (deliver) { 917 if (deliver) {
713 rc = __do_deliver_interrupt(vcpu, inti); 918 rc = __deliver_floating_interrupt(vcpu, inti);
714 kfree(inti); 919 kfree(inti);
715 } 920 }
716 } while (!rc && deliver); 921 } while (!rc && deliver);
@@ -719,23 +924,26 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
719 return rc; 924 return rc;
720} 925}
721 926
722int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) 927static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
723{ 928{
724 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 929 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
725 struct kvm_s390_interrupt_info *inti;
726 930
727 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 931 li->irq.pgm = irq->u.pgm;
728 if (!inti) 932 set_bit(IRQ_PEND_PROG, &li->pending_irqs);
729 return -ENOMEM; 933 return 0;
934}
730 935
731 inti->type = KVM_S390_PROGRAM_INT; 936int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
732 inti->pgm.code = code; 937{
938 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
939 struct kvm_s390_irq irq;
733 940
734 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); 941 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
735 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); 942 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code,
943 0, 1);
736 spin_lock(&li->lock); 944 spin_lock(&li->lock);
737 list_add(&inti->list, &li->list); 945 irq.u.pgm.code = code;
738 atomic_set(&li->active, 1); 946 __inject_prog(vcpu, &irq);
739 BUG_ON(waitqueue_active(li->wq)); 947 BUG_ON(waitqueue_active(li->wq));
740 spin_unlock(&li->lock); 948 spin_unlock(&li->lock);
741 return 0; 949 return 0;
@@ -745,27 +953,166 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
745 struct kvm_s390_pgm_info *pgm_info) 953 struct kvm_s390_pgm_info *pgm_info)
746{ 954{
747 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 955 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
748 struct kvm_s390_interrupt_info *inti; 956 struct kvm_s390_irq irq;
749 957 int rc;
750 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
751 if (!inti)
752 return -ENOMEM;
753 958
754 VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)", 959 VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
755 pgm_info->code); 960 pgm_info->code);
756 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, 961 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
757 pgm_info->code, 0, 1); 962 pgm_info->code, 0, 1);
758
759 inti->type = KVM_S390_PROGRAM_INT;
760 memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
761 spin_lock(&li->lock); 963 spin_lock(&li->lock);
762 list_add(&inti->list, &li->list); 964 irq.u.pgm = *pgm_info;
763 atomic_set(&li->active, 1); 965 rc = __inject_prog(vcpu, &irq);
764 BUG_ON(waitqueue_active(li->wq)); 966 BUG_ON(waitqueue_active(li->wq));
765 spin_unlock(&li->lock); 967 spin_unlock(&li->lock);
968 return rc;
969}
970
971static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
972{
973 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
974
975 VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx",
976 irq->u.ext.ext_params, irq->u.ext.ext_params2);
977 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
978 irq->u.ext.ext_params,
979 irq->u.ext.ext_params2, 2);
980
981 li->irq.ext = irq->u.ext;
982 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
983 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
766 return 0; 984 return 0;
767} 985}
768 986
987int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
988{
989 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
990 struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
991
992 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
993 irq->u.extcall.code);
994 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
995 irq->u.extcall.code, 0, 2);
996
997 *extcall = irq->u.extcall;
998 set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
999 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1000 return 0;
1001}
1002
1003static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1004{
1005 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1006 struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1007
1008 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
1009 prefix->address);
1010 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1011 prefix->address, 0, 2);
1012
1013 *prefix = irq->u.prefix;
1014 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1015 return 0;
1016}
1017
1018static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1019{
1020 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1021
1022 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
1023
1024 li->action_bits |= ACTION_STOP_ON_STOP;
1025 set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1026 return 0;
1027}
1028
1029static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
1030 struct kvm_s390_irq *irq)
1031{
1032 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1033
1034 VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type);
1035 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2);
1036
1037 set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1038 return 0;
1039}
1040
1041static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1042 struct kvm_s390_irq *irq)
1043{
1044 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1045 struct kvm_s390_emerg_info *emerg = &li->irq.emerg;
1046
1047 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
1048 irq->u.emerg.code);
1049 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1050 emerg->code, 0, 2);
1051
1052 set_bit(emerg->code, li->sigp_emerg_pending);
1053 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1054 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1055 return 0;
1056}
1057
1058static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1059{
1060 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1061 struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1062
1063 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
1064 mchk->mcic);
1065 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1066 mchk->mcic, 2);
1067
1068 /*
1069 * Because repressible machine checks can be indicated along with
1070 * exigent machine checks (PoP, Chapter 11, Interruption action)
1071 * we need to combine cr14, mcic and external damage code.
1072 * Failing storage address and the logout area should not be or'ed
1073 * together, we just indicate the last occurrence of the corresponding
1074 * machine check
1075 */
1076 mchk->cr14 |= irq->u.mchk.cr14;
1077 mchk->mcic |= irq->u.mchk.mcic;
1078 mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1079 mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1080 memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1081 sizeof(mchk->fixed_logout));
1082 if (mchk->mcic & MCHK_EX_MASK)
1083 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1084 else if (mchk->mcic & MCHK_REP_MASK)
1085 set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
1086 return 0;
1087}
1088
1089static int __inject_ckc(struct kvm_vcpu *vcpu)
1090{
1091 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1092
1093 VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP);
1094 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1095 0, 0, 2);
1096
1097 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1098 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1099 return 0;
1100}
1101
1102static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1103{
1104 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1105
1106 VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER);
1107 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1108 0, 0, 2);
1109
1110 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1111 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1112 return 0;
1113}
1114
1115
769struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, 1116struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
770 u64 cr6, u64 schid) 1117 u64 cr6, u64 schid)
771{ 1118{
@@ -851,7 +1198,17 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
851 dst_vcpu = kvm_get_vcpu(kvm, sigcpu); 1198 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
852 li = &dst_vcpu->arch.local_int; 1199 li = &dst_vcpu->arch.local_int;
853 spin_lock(&li->lock); 1200 spin_lock(&li->lock);
854 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1201 switch (inti->type) {
1202 case KVM_S390_MCHK:
1203 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
1204 break;
1205 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1206 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
1207 break;
1208 default:
1209 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1210 break;
1211 }
855 spin_unlock(&li->lock); 1212 spin_unlock(&li->lock);
856 kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); 1213 kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
857unlock_fi: 1214unlock_fi:
@@ -920,92 +1277,85 @@ void kvm_s390_reinject_io_int(struct kvm *kvm,
920 __inject_vm(kvm, inti); 1277 __inject_vm(kvm, inti);
921} 1278}
922 1279
923int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 1280int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
924 struct kvm_s390_interrupt *s390int) 1281 struct kvm_s390_irq *irq)
925{ 1282{
926 struct kvm_s390_local_interrupt *li; 1283 irq->type = s390int->type;
927 struct kvm_s390_interrupt_info *inti; 1284 switch (irq->type) {
1285 case KVM_S390_PROGRAM_INT:
1286 if (s390int->parm & 0xffff0000)
1287 return -EINVAL;
1288 irq->u.pgm.code = s390int->parm;
1289 break;
1290 case KVM_S390_SIGP_SET_PREFIX:
1291 irq->u.prefix.address = s390int->parm;
1292 break;
1293 case KVM_S390_INT_EXTERNAL_CALL:
1294 if (irq->u.extcall.code & 0xffff0000)
1295 return -EINVAL;
1296 irq->u.extcall.code = s390int->parm;
1297 break;
1298 case KVM_S390_INT_EMERGENCY:
1299 if (irq->u.emerg.code & 0xffff0000)
1300 return -EINVAL;
1301 irq->u.emerg.code = s390int->parm;
1302 break;
1303 case KVM_S390_MCHK:
1304 irq->u.mchk.mcic = s390int->parm64;
1305 break;
1306 }
1307 return 0;
1308}
928 1309
929 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 1310int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
930 if (!inti) 1311{
931 return -ENOMEM; 1312 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1313 int rc;
932 1314
933 switch (s390int->type) { 1315 spin_lock(&li->lock);
1316 switch (irq->type) {
934 case KVM_S390_PROGRAM_INT: 1317 case KVM_S390_PROGRAM_INT:
935 if (s390int->parm & 0xffff0000) {
936 kfree(inti);
937 return -EINVAL;
938 }
939 inti->type = s390int->type;
940 inti->pgm.code = s390int->parm;
941 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", 1318 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
942 s390int->parm); 1319 irq->u.pgm.code);
1320 rc = __inject_prog(vcpu, irq);
943 break; 1321 break;
944 case KVM_S390_SIGP_SET_PREFIX: 1322 case KVM_S390_SIGP_SET_PREFIX:
945 inti->prefix.address = s390int->parm; 1323 rc = __inject_set_prefix(vcpu, irq);
946 inti->type = s390int->type;
947 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
948 s390int->parm);
949 break; 1324 break;
950 case KVM_S390_SIGP_STOP: 1325 case KVM_S390_SIGP_STOP:
1326 rc = __inject_sigp_stop(vcpu, irq);
1327 break;
951 case KVM_S390_RESTART: 1328 case KVM_S390_RESTART:
1329 rc = __inject_sigp_restart(vcpu, irq);
1330 break;
952 case KVM_S390_INT_CLOCK_COMP: 1331 case KVM_S390_INT_CLOCK_COMP:
1332 rc = __inject_ckc(vcpu);
1333 break;
953 case KVM_S390_INT_CPU_TIMER: 1334 case KVM_S390_INT_CPU_TIMER:
954 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); 1335 rc = __inject_cpu_timer(vcpu);
955 inti->type = s390int->type;
956 break; 1336 break;
957 case KVM_S390_INT_EXTERNAL_CALL: 1337 case KVM_S390_INT_EXTERNAL_CALL:
958 if (s390int->parm & 0xffff0000) { 1338 rc = __inject_extcall(vcpu, irq);
959 kfree(inti);
960 return -EINVAL;
961 }
962 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
963 s390int->parm);
964 inti->type = s390int->type;
965 inti->extcall.code = s390int->parm;
966 break; 1339 break;
967 case KVM_S390_INT_EMERGENCY: 1340 case KVM_S390_INT_EMERGENCY:
968 if (s390int->parm & 0xffff0000) { 1341 rc = __inject_sigp_emergency(vcpu, irq);
969 kfree(inti);
970 return -EINVAL;
971 }
972 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm);
973 inti->type = s390int->type;
974 inti->emerg.code = s390int->parm;
975 break; 1342 break;
976 case KVM_S390_MCHK: 1343 case KVM_S390_MCHK:
977 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", 1344 rc = __inject_mchk(vcpu, irq);
978 s390int->parm64);
979 inti->type = s390int->type;
980 inti->mchk.mcic = s390int->parm64;
981 break; 1345 break;
982 case KVM_S390_INT_PFAULT_INIT: 1346 case KVM_S390_INT_PFAULT_INIT:
983 inti->type = s390int->type; 1347 rc = __inject_pfault_init(vcpu, irq);
984 inti->ext.ext_params2 = s390int->parm64;
985 break; 1348 break;
986 case KVM_S390_INT_VIRTIO: 1349 case KVM_S390_INT_VIRTIO:
987 case KVM_S390_INT_SERVICE: 1350 case KVM_S390_INT_SERVICE:
988 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1351 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
989 default: 1352 default:
990 kfree(inti); 1353 rc = -EINVAL;
991 return -EINVAL;
992 } 1354 }
993 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm,
994 s390int->parm64, 2);
995
996 li = &vcpu->arch.local_int;
997 spin_lock(&li->lock);
998 if (inti->type == KVM_S390_PROGRAM_INT)
999 list_add(&inti->list, &li->list);
1000 else
1001 list_add_tail(&inti->list, &li->list);
1002 atomic_set(&li->active, 1);
1003 if (inti->type == KVM_S390_SIGP_STOP)
1004 li->action_bits |= ACTION_STOP_ON_STOP;
1005 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1006 spin_unlock(&li->lock); 1355 spin_unlock(&li->lock);
1007 kvm_s390_vcpu_wakeup(vcpu); 1356 if (!rc)
1008 return 0; 1357 kvm_s390_vcpu_wakeup(vcpu);
1358 return rc;
1009} 1359}
1010 1360
1011void kvm_s390_clear_float_irqs(struct kvm *kvm) 1361void kvm_s390_clear_float_irqs(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 6b049ee75a56..3e09801e3104 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -81,10 +81,17 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
81 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, 81 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
82 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, 82 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
83 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, 83 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
84 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
85 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
84 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, 86 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
87 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
88 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
85 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, 89 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
86 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, 90 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
87 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, 91 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
92 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
93 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
94 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
88 { "diagnose_10", VCPU_STAT(diagnose_10) }, 95 { "diagnose_10", VCPU_STAT(diagnose_10) },
89 { "diagnose_44", VCPU_STAT(diagnose_44) }, 96 { "diagnose_44", VCPU_STAT(diagnose_44) },
90 { "diagnose_9c", VCPU_STAT(diagnose_9c) }, 97 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
@@ -453,6 +460,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
453 spin_lock_init(&kvm->arch.float_int.lock); 460 spin_lock_init(&kvm->arch.float_int.lock);
454 INIT_LIST_HEAD(&kvm->arch.float_int.list); 461 INIT_LIST_HEAD(&kvm->arch.float_int.list);
455 init_waitqueue_head(&kvm->arch.ipte_wq); 462 init_waitqueue_head(&kvm->arch.ipte_wq);
463 mutex_init(&kvm->arch.ipte_mutex);
456 464
457 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 465 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
458 VM_EVENT(kvm, 3, "%s", "vm created"); 466 VM_EVENT(kvm, 3, "%s", "vm created");
@@ -711,7 +719,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
711 } 719 }
712 720
713 spin_lock_init(&vcpu->arch.local_int.lock); 721 spin_lock_init(&vcpu->arch.local_int.lock);
714 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
715 vcpu->arch.local_int.float_int = &kvm->arch.float_int; 722 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
716 vcpu->arch.local_int.wq = &vcpu->wq; 723 vcpu->arch.local_int.wq = &vcpu->wq;
717 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 724 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
@@ -1114,13 +1121,15 @@ static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1114 unsigned long token) 1121 unsigned long token)
1115{ 1122{
1116 struct kvm_s390_interrupt inti; 1123 struct kvm_s390_interrupt inti;
1117 inti.parm64 = token; 1124 struct kvm_s390_irq irq;
1118 1125
1119 if (start_token) { 1126 if (start_token) {
1120 inti.type = KVM_S390_INT_PFAULT_INIT; 1127 irq.u.ext.ext_params2 = token;
1121 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti)); 1128 irq.type = KVM_S390_INT_PFAULT_INIT;
1129 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
1122 } else { 1130 } else {
1123 inti.type = KVM_S390_INT_PFAULT_DONE; 1131 inti.type = KVM_S390_INT_PFAULT_DONE;
1132 inti.parm64 = token;
1124 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); 1133 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1125 } 1134 }
1126} 1135}
@@ -1614,11 +1623,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
1614 switch (ioctl) { 1623 switch (ioctl) {
1615 case KVM_S390_INTERRUPT: { 1624 case KVM_S390_INTERRUPT: {
1616 struct kvm_s390_interrupt s390int; 1625 struct kvm_s390_interrupt s390int;
1626 struct kvm_s390_irq s390irq;
1617 1627
1618 r = -EFAULT; 1628 r = -EFAULT;
1619 if (copy_from_user(&s390int, argp, sizeof(s390int))) 1629 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1620 break; 1630 break;
1621 r = kvm_s390_inject_vcpu(vcpu, &s390int); 1631 if (s390int_to_s390irq(&s390int, &s390irq))
1632 return -EINVAL;
1633 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
1622 break; 1634 break;
1623 } 1635 }
1624 case KVM_S390_STORE_STATUS: 1636 case KVM_S390_STORE_STATUS:
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 244d02303182..a8f3d9b71c11 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -24,8 +24,6 @@ typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
24/* declare vfacilities extern */ 24/* declare vfacilities extern */
25extern unsigned long *vfacilities; 25extern unsigned long *vfacilities;
26 26
27int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
28
29/* Transactional Memory Execution related macros */ 27/* Transactional Memory Execution related macros */
30#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) 28#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10))
31#define TDB_FORMAT1 1 29#define TDB_FORMAT1 1
@@ -144,7 +142,7 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm);
144int __must_check kvm_s390_inject_vm(struct kvm *kvm, 142int __must_check kvm_s390_inject_vm(struct kvm *kvm,
145 struct kvm_s390_interrupt *s390int); 143 struct kvm_s390_interrupt *s390int);
146int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 144int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
147 struct kvm_s390_interrupt *s390int); 145 struct kvm_s390_irq *irq);
148int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); 146int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
149struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, 147struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
150 u64 cr6, u64 schid); 148 u64 cr6, u64 schid);
@@ -152,6 +150,10 @@ void kvm_s390_reinject_io_int(struct kvm *kvm,
152 struct kvm_s390_interrupt_info *inti); 150 struct kvm_s390_interrupt_info *inti);
153int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); 151int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
154 152
153/* implemented in intercept.c */
154void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc);
155int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
156
155/* implemented in priv.c */ 157/* implemented in priv.c */
156int is_valid_psw(psw_t *psw); 158int is_valid_psw(psw_t *psw);
157int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); 159int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
@@ -222,6 +224,9 @@ static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
222 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 224 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
223} 225}
224 226
227int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
228 struct kvm_s390_irq *s390irq);
229
225/* implemented in interrupt.c */ 230/* implemented in interrupt.c */
226int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); 231int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
227int psw_extint_disabled(struct kvm_vcpu *vcpu); 232int psw_extint_disabled(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index f47cb0c6d906..1be578d64dfc 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -180,21 +180,18 @@ static int handle_skey(struct kvm_vcpu *vcpu)
180 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 180 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
181 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 181 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
182 182
183 vcpu->arch.sie_block->gpsw.addr = 183 kvm_s390_rewind_psw(vcpu, 4);
184 __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
185 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 184 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
186 return 0; 185 return 0;
187} 186}
188 187
189static int handle_ipte_interlock(struct kvm_vcpu *vcpu) 188static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
190{ 189{
191 psw_t *psw = &vcpu->arch.sie_block->gpsw;
192
193 vcpu->stat.instruction_ipte_interlock++; 190 vcpu->stat.instruction_ipte_interlock++;
194 if (psw_bits(*psw).p) 191 if (psw_bits(vcpu->arch.sie_block->gpsw).p)
195 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 192 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
196 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); 193 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
197 psw->addr = __rewind_psw(*psw, 4); 194 kvm_s390_rewind_psw(vcpu, 4);
198 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); 195 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
199 return 0; 196 return 0;
200} 197}
@@ -650,10 +647,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
650 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 647 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
651 648
652 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 649 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
653 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 650 start = kvm_s390_logical_to_effective(vcpu, start);
654 if (kvm_s390_check_low_addr_protection(vcpu, start))
655 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
656 }
657 651
658 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 652 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
659 case 0x00000000: 653 case 0x00000000:
@@ -669,6 +663,12 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
669 default: 663 default:
670 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 664 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
671 } 665 }
666
667 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
668 if (kvm_s390_check_low_addr_protection(vcpu, start))
669 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
670 }
671
672 while (start < end) { 672 while (start < end) {
673 unsigned long useraddr, abs_addr; 673 unsigned long useraddr, abs_addr;
674 674
@@ -725,8 +725,7 @@ static int handle_essa(struct kvm_vcpu *vcpu)
725 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 725 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
726 726
727 /* Rewind PSW to repeat the ESSA instruction */ 727 /* Rewind PSW to repeat the ESSA instruction */
728 vcpu->arch.sie_block->gpsw.addr = 728 kvm_s390_rewind_psw(vcpu, 4);
729 __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
730 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ 729 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
731 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); 730 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
732 down_read(&gmap->mm->mmap_sem); 731 down_read(&gmap->mm->mmap_sem);
@@ -769,8 +768,8 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
769{ 768{
770 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 769 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
771 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 770 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
772 u32 val = 0; 771 int reg, rc, nr_regs;
773 int reg, rc; 772 u32 ctl_array[16];
774 u64 ga; 773 u64 ga;
775 774
776 vcpu->stat.instruction_lctl++; 775 vcpu->stat.instruction_lctl++;
@@ -786,19 +785,20 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
786 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 785 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
787 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 786 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
788 787
788 nr_regs = ((reg3 - reg1) & 0xf) + 1;
789 rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
790 if (rc)
791 return kvm_s390_inject_prog_cond(vcpu, rc);
789 reg = reg1; 792 reg = reg1;
793 nr_regs = 0;
790 do { 794 do {
791 rc = read_guest(vcpu, ga, &val, sizeof(val));
792 if (rc)
793 return kvm_s390_inject_prog_cond(vcpu, rc);
794 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; 795 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
795 vcpu->arch.sie_block->gcr[reg] |= val; 796 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
796 ga += 4;
797 if (reg == reg3) 797 if (reg == reg3)
798 break; 798 break;
799 reg = (reg + 1) % 16; 799 reg = (reg + 1) % 16;
800 } while (1); 800 } while (1);
801 801 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
802 return 0; 802 return 0;
803} 803}
804 804
@@ -806,9 +806,9 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
806{ 806{
807 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 807 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
808 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 808 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
809 int reg, rc, nr_regs;
810 u32 ctl_array[16];
809 u64 ga; 811 u64 ga;
810 u32 val;
811 int reg, rc;
812 812
813 vcpu->stat.instruction_stctl++; 813 vcpu->stat.instruction_stctl++;
814 814
@@ -824,26 +824,24 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
824 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); 824 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
825 825
826 reg = reg1; 826 reg = reg1;
827 nr_regs = 0;
827 do { 828 do {
828 val = vcpu->arch.sie_block->gcr[reg] & 0x00000000fffffffful; 829 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
829 rc = write_guest(vcpu, ga, &val, sizeof(val));
830 if (rc)
831 return kvm_s390_inject_prog_cond(vcpu, rc);
832 ga += 4;
833 if (reg == reg3) 830 if (reg == reg3)
834 break; 831 break;
835 reg = (reg + 1) % 16; 832 reg = (reg + 1) % 16;
836 } while (1); 833 } while (1);
837 834 rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
838 return 0; 835 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
839} 836}
840 837
841static int handle_lctlg(struct kvm_vcpu *vcpu) 838static int handle_lctlg(struct kvm_vcpu *vcpu)
842{ 839{
843 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 840 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
844 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 841 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
845 u64 ga, val; 842 int reg, rc, nr_regs;
846 int reg, rc; 843 u64 ctl_array[16];
844 u64 ga;
847 845
848 vcpu->stat.instruction_lctlg++; 846 vcpu->stat.instruction_lctlg++;
849 847
@@ -855,22 +853,22 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
855 if (ga & 7) 853 if (ga & 7)
856 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 854 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
857 855
858 reg = reg1;
859
860 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 856 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
861 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 857 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
862 858
859 nr_regs = ((reg3 - reg1) & 0xf) + 1;
860 rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
861 if (rc)
862 return kvm_s390_inject_prog_cond(vcpu, rc);
863 reg = reg1;
864 nr_regs = 0;
863 do { 865 do {
864 rc = read_guest(vcpu, ga, &val, sizeof(val)); 866 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
865 if (rc)
866 return kvm_s390_inject_prog_cond(vcpu, rc);
867 vcpu->arch.sie_block->gcr[reg] = val;
868 ga += 8;
869 if (reg == reg3) 867 if (reg == reg3)
870 break; 868 break;
871 reg = (reg + 1) % 16; 869 reg = (reg + 1) % 16;
872 } while (1); 870 } while (1);
873 871 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
874 return 0; 872 return 0;
875} 873}
876 874
@@ -878,8 +876,9 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
878{ 876{
879 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 877 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
880 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 878 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
881 u64 ga, val; 879 int reg, rc, nr_regs;
882 int reg, rc; 880 u64 ctl_array[16];
881 u64 ga;
883 882
884 vcpu->stat.instruction_stctg++; 883 vcpu->stat.instruction_stctg++;
885 884
@@ -891,23 +890,19 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
891 if (ga & 7) 890 if (ga & 7)
892 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 891 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
893 892
894 reg = reg1;
895
896 VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 893 VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
897 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); 894 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
898 895
896 reg = reg1;
897 nr_regs = 0;
899 do { 898 do {
900 val = vcpu->arch.sie_block->gcr[reg]; 899 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
901 rc = write_guest(vcpu, ga, &val, sizeof(val));
902 if (rc)
903 return kvm_s390_inject_prog_cond(vcpu, rc);
904 ga += 8;
905 if (reg == reg3) 900 if (reg == reg3)
906 break; 901 break;
907 reg = (reg + 1) % 16; 902 reg = (reg + 1) % 16;
908 } while (1); 903 } while (1);
909 904 rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
910 return 0; 905 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
911} 906}
912 907
913static const intercept_handler_t eb_handlers[256] = { 908static const intercept_handler_t eb_handlers[256] = {
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index cf243ba3d50f..6651f9f73973 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -20,20 +20,13 @@
20#include "kvm-s390.h" 20#include "kvm-s390.h"
21#include "trace.h" 21#include "trace.h"
22 22
23static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, 23static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
24 u64 *reg) 24 u64 *reg)
25{ 25{
26 struct kvm_s390_local_interrupt *li; 26 struct kvm_s390_local_interrupt *li;
27 struct kvm_vcpu *dst_vcpu = NULL;
28 int cpuflags; 27 int cpuflags;
29 int rc; 28 int rc;
30 29
31 if (cpu_addr >= KVM_MAX_VCPUS)
32 return SIGP_CC_NOT_OPERATIONAL;
33
34 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
35 if (!dst_vcpu)
36 return SIGP_CC_NOT_OPERATIONAL;
37 li = &dst_vcpu->arch.local_int; 30 li = &dst_vcpu->arch.local_int;
38 31
39 cpuflags = atomic_read(li->cpuflags); 32 cpuflags = atomic_read(li->cpuflags);
@@ -48,55 +41,53 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
48 rc = SIGP_CC_STATUS_STORED; 41 rc = SIGP_CC_STATUS_STORED;
49 } 42 }
50 43
51 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); 44 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id,
45 rc);
52 return rc; 46 return rc;
53} 47}
54 48
55static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) 49static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
50 struct kvm_vcpu *dst_vcpu)
56{ 51{
57 struct kvm_s390_interrupt s390int = { 52 struct kvm_s390_irq irq = {
58 .type = KVM_S390_INT_EMERGENCY, 53 .type = KVM_S390_INT_EMERGENCY,
59 .parm = vcpu->vcpu_id, 54 .u.emerg.code = vcpu->vcpu_id,
60 }; 55 };
61 struct kvm_vcpu *dst_vcpu = NULL;
62 int rc = 0; 56 int rc = 0;
63 57
64 if (cpu_addr < KVM_MAX_VCPUS) 58 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
65 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
66 if (!dst_vcpu)
67 return SIGP_CC_NOT_OPERATIONAL;
68
69 rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
70 if (!rc) 59 if (!rc)
71 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); 60 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x",
61 dst_vcpu->vcpu_id);
72 62
73 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; 63 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
74} 64}
75 65
76static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, 66static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
67{
68 return __inject_sigp_emergency(vcpu, dst_vcpu);
69}
70
71static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
72 struct kvm_vcpu *dst_vcpu,
77 u16 asn, u64 *reg) 73 u16 asn, u64 *reg)
78{ 74{
79 struct kvm_vcpu *dst_vcpu = NULL;
80 const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; 75 const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
81 u16 p_asn, s_asn; 76 u16 p_asn, s_asn;
82 psw_t *psw; 77 psw_t *psw;
83 u32 flags; 78 u32 flags;
84 79
85 if (cpu_addr < KVM_MAX_VCPUS)
86 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
87 if (!dst_vcpu)
88 return SIGP_CC_NOT_OPERATIONAL;
89 flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags); 80 flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
90 psw = &dst_vcpu->arch.sie_block->gpsw; 81 psw = &dst_vcpu->arch.sie_block->gpsw;
91 p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ 82 p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
92 s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ 83 s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
93 84
94 /* Deliver the emergency signal? */ 85 /* Inject the emergency signal? */
95 if (!(flags & CPUSTAT_STOPPED) 86 if (!(flags & CPUSTAT_STOPPED)
96 || (psw->mask & psw_int_mask) != psw_int_mask 87 || (psw->mask & psw_int_mask) != psw_int_mask
97 || ((flags & CPUSTAT_WAIT) && psw->addr != 0) 88 || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
98 || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) { 89 || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
99 return __sigp_emergency(vcpu, cpu_addr); 90 return __inject_sigp_emergency(vcpu, dst_vcpu);
100 } else { 91 } else {
101 *reg &= 0xffffffff00000000UL; 92 *reg &= 0xffffffff00000000UL;
102 *reg |= SIGP_STATUS_INCORRECT_STATE; 93 *reg |= SIGP_STATUS_INCORRECT_STATE;
@@ -104,23 +95,19 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
104 } 95 }
105} 96}
106 97
107static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) 98static int __sigp_external_call(struct kvm_vcpu *vcpu,
99 struct kvm_vcpu *dst_vcpu)
108{ 100{
109 struct kvm_s390_interrupt s390int = { 101 struct kvm_s390_irq irq = {
110 .type = KVM_S390_INT_EXTERNAL_CALL, 102 .type = KVM_S390_INT_EXTERNAL_CALL,
111 .parm = vcpu->vcpu_id, 103 .u.extcall.code = vcpu->vcpu_id,
112 }; 104 };
113 struct kvm_vcpu *dst_vcpu = NULL;
114 int rc; 105 int rc;
115 106
116 if (cpu_addr < KVM_MAX_VCPUS) 107 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
117 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
118 if (!dst_vcpu)
119 return SIGP_CC_NOT_OPERATIONAL;
120
121 rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
122 if (!rc) 108 if (!rc)
123 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); 109 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
110 dst_vcpu->vcpu_id);
124 111
125 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; 112 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
126} 113}
@@ -128,29 +115,20 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
128static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action) 115static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
129{ 116{
130 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; 117 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
131 struct kvm_s390_interrupt_info *inti;
132 int rc = SIGP_CC_ORDER_CODE_ACCEPTED; 118 int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
133 119
134 inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
135 if (!inti)
136 return -ENOMEM;
137 inti->type = KVM_S390_SIGP_STOP;
138
139 spin_lock(&li->lock); 120 spin_lock(&li->lock);
140 if (li->action_bits & ACTION_STOP_ON_STOP) { 121 if (li->action_bits & ACTION_STOP_ON_STOP) {
141 /* another SIGP STOP is pending */ 122 /* another SIGP STOP is pending */
142 kfree(inti);
143 rc = SIGP_CC_BUSY; 123 rc = SIGP_CC_BUSY;
144 goto out; 124 goto out;
145 } 125 }
146 if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { 126 if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
147 kfree(inti);
148 if ((action & ACTION_STORE_ON_STOP) != 0) 127 if ((action & ACTION_STORE_ON_STOP) != 0)
149 rc = -ESHUTDOWN; 128 rc = -ESHUTDOWN;
150 goto out; 129 goto out;
151 } 130 }
152 list_add_tail(&inti->list, &li->list); 131 set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
153 atomic_set(&li->active, 1);
154 li->action_bits |= action; 132 li->action_bits |= action;
155 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); 133 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
156 kvm_s390_vcpu_wakeup(dst_vcpu); 134 kvm_s390_vcpu_wakeup(dst_vcpu);
@@ -160,23 +138,27 @@ out:
160 return rc; 138 return rc;
161} 139}
162 140
163static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) 141static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
164{ 142{
165 struct kvm_vcpu *dst_vcpu = NULL;
166 int rc; 143 int rc;
167 144
168 if (cpu_addr >= KVM_MAX_VCPUS) 145 rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP);
169 return SIGP_CC_NOT_OPERATIONAL; 146 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id);
170 147
171 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 148 return rc;
172 if (!dst_vcpu) 149}
173 return SIGP_CC_NOT_OPERATIONAL;
174 150
175 rc = __inject_sigp_stop(dst_vcpu, action); 151static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
152 struct kvm_vcpu *dst_vcpu, u64 *reg)
153{
154 int rc;
176 155
177 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); 156 rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP |
157 ACTION_STORE_ON_STOP);
158 VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
159 dst_vcpu->vcpu_id);
178 160
179 if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) { 161 if (rc == -ESHUTDOWN) {
180 /* If the CPU has already been stopped, we still have 162 /* If the CPU has already been stopped, we still have
181 * to save the status when doing stop-and-store. This 163 * to save the status when doing stop-and-store. This
182 * has to be done after unlocking all spinlocks. */ 164 * has to be done after unlocking all spinlocks. */
@@ -212,18 +194,12 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
212 return rc; 194 return rc;
213} 195}
214 196
215static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, 197static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
216 u64 *reg) 198 u32 address, u64 *reg)
217{ 199{
218 struct kvm_s390_local_interrupt *li; 200 struct kvm_s390_local_interrupt *li;
219 struct kvm_vcpu *dst_vcpu = NULL;
220 struct kvm_s390_interrupt_info *inti;
221 int rc; 201 int rc;
222 202
223 if (cpu_addr < KVM_MAX_VCPUS)
224 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
225 if (!dst_vcpu)
226 return SIGP_CC_NOT_OPERATIONAL;
227 li = &dst_vcpu->arch.local_int; 203 li = &dst_vcpu->arch.local_int;
228 204
229 /* 205 /*
@@ -238,46 +214,34 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
238 return SIGP_CC_STATUS_STORED; 214 return SIGP_CC_STATUS_STORED;
239 } 215 }
240 216
241 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
242 if (!inti)
243 return SIGP_CC_BUSY;
244
245 spin_lock(&li->lock); 217 spin_lock(&li->lock);
246 /* cpu must be in stopped state */ 218 /* cpu must be in stopped state */
247 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { 219 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
248 *reg &= 0xffffffff00000000UL; 220 *reg &= 0xffffffff00000000UL;
249 *reg |= SIGP_STATUS_INCORRECT_STATE; 221 *reg |= SIGP_STATUS_INCORRECT_STATE;
250 rc = SIGP_CC_STATUS_STORED; 222 rc = SIGP_CC_STATUS_STORED;
251 kfree(inti);
252 goto out_li; 223 goto out_li;
253 } 224 }
254 225
255 inti->type = KVM_S390_SIGP_SET_PREFIX; 226 li->irq.prefix.address = address;
256 inti->prefix.address = address; 227 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
257
258 list_add_tail(&inti->list, &li->list);
259 atomic_set(&li->active, 1);
260 kvm_s390_vcpu_wakeup(dst_vcpu); 228 kvm_s390_vcpu_wakeup(dst_vcpu);
261 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 229 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
262 230
263 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); 231 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id,
232 address);
264out_li: 233out_li:
265 spin_unlock(&li->lock); 234 spin_unlock(&li->lock);
266 return rc; 235 return rc;
267} 236}
268 237
269static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id, 238static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
270 u32 addr, u64 *reg) 239 struct kvm_vcpu *dst_vcpu,
240 u32 addr, u64 *reg)
271{ 241{
272 struct kvm_vcpu *dst_vcpu = NULL;
273 int flags; 242 int flags;
274 int rc; 243 int rc;
275 244
276 if (cpu_id < KVM_MAX_VCPUS)
277 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
278 if (!dst_vcpu)
279 return SIGP_CC_NOT_OPERATIONAL;
280
281 spin_lock(&dst_vcpu->arch.local_int.lock); 245 spin_lock(&dst_vcpu->arch.local_int.lock);
282 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); 246 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
283 spin_unlock(&dst_vcpu->arch.local_int.lock); 247 spin_unlock(&dst_vcpu->arch.local_int.lock);
@@ -297,19 +261,12 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
297 return rc; 261 return rc;
298} 262}
299 263
300static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, 264static int __sigp_sense_running(struct kvm_vcpu *vcpu,
301 u64 *reg) 265 struct kvm_vcpu *dst_vcpu, u64 *reg)
302{ 266{
303 struct kvm_s390_local_interrupt *li; 267 struct kvm_s390_local_interrupt *li;
304 struct kvm_vcpu *dst_vcpu = NULL;
305 int rc; 268 int rc;
306 269
307 if (cpu_addr >= KVM_MAX_VCPUS)
308 return SIGP_CC_NOT_OPERATIONAL;
309
310 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
311 if (!dst_vcpu)
312 return SIGP_CC_NOT_OPERATIONAL;
313 li = &dst_vcpu->arch.local_int; 270 li = &dst_vcpu->arch.local_int;
314 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { 271 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
315 /* running */ 272 /* running */
@@ -321,26 +278,19 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
321 rc = SIGP_CC_STATUS_STORED; 278 rc = SIGP_CC_STATUS_STORED;
322 } 279 }
323 280
324 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr, 281 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
325 rc); 282 dst_vcpu->vcpu_id, rc);
326 283
327 return rc; 284 return rc;
328} 285}
329 286
330/* Test whether the destination CPU is available and not busy */ 287static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
331static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) 288 struct kvm_vcpu *dst_vcpu, u8 order_code)
332{ 289{
333 struct kvm_s390_local_interrupt *li; 290 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
334 int rc = SIGP_CC_ORDER_CODE_ACCEPTED; 291 /* handle (RE)START in user space */
335 struct kvm_vcpu *dst_vcpu = NULL; 292 int rc = -EOPNOTSUPP;
336
337 if (cpu_addr >= KVM_MAX_VCPUS)
338 return SIGP_CC_NOT_OPERATIONAL;
339 293
340 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
341 if (!dst_vcpu)
342 return SIGP_CC_NOT_OPERATIONAL;
343 li = &dst_vcpu->arch.local_int;
344 spin_lock(&li->lock); 294 spin_lock(&li->lock);
345 if (li->action_bits & ACTION_STOP_ON_STOP) 295 if (li->action_bits & ACTION_STOP_ON_STOP)
346 rc = SIGP_CC_BUSY; 296 rc = SIGP_CC_BUSY;
@@ -349,90 +299,131 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
349 return rc; 299 return rc;
350} 300}
351 301
352int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) 302static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu,
303 struct kvm_vcpu *dst_vcpu, u8 order_code)
353{ 304{
354 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 305 /* handle (INITIAL) CPU RESET in user space */
355 int r3 = vcpu->arch.sie_block->ipa & 0x000f; 306 return -EOPNOTSUPP;
356 u32 parameter; 307}
357 u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
358 u8 order_code;
359 int rc;
360 308
361 /* sigp in userspace can exit */ 309static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu,
362 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 310 struct kvm_vcpu *dst_vcpu)
363 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 311{
312 /* handle unknown orders in user space */
313 return -EOPNOTSUPP;
314}
364 315
365 order_code = kvm_s390_get_base_disp_rs(vcpu); 316static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
317 u16 cpu_addr, u32 parameter, u64 *status_reg)
318{
319 int rc;
320 struct kvm_vcpu *dst_vcpu;
366 321
367 if (r1 % 2) 322 if (cpu_addr >= KVM_MAX_VCPUS)
368 parameter = vcpu->run->s.regs.gprs[r1]; 323 return SIGP_CC_NOT_OPERATIONAL;
369 else 324
370 parameter = vcpu->run->s.regs.gprs[r1 + 1]; 325 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
326 if (!dst_vcpu)
327 return SIGP_CC_NOT_OPERATIONAL;
371 328
372 trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
373 switch (order_code) { 329 switch (order_code) {
374 case SIGP_SENSE: 330 case SIGP_SENSE:
375 vcpu->stat.instruction_sigp_sense++; 331 vcpu->stat.instruction_sigp_sense++;
376 rc = __sigp_sense(vcpu, cpu_addr, 332 rc = __sigp_sense(vcpu, dst_vcpu, status_reg);
377 &vcpu->run->s.regs.gprs[r1]);
378 break; 333 break;
379 case SIGP_EXTERNAL_CALL: 334 case SIGP_EXTERNAL_CALL:
380 vcpu->stat.instruction_sigp_external_call++; 335 vcpu->stat.instruction_sigp_external_call++;
381 rc = __sigp_external_call(vcpu, cpu_addr); 336 rc = __sigp_external_call(vcpu, dst_vcpu);
382 break; 337 break;
383 case SIGP_EMERGENCY_SIGNAL: 338 case SIGP_EMERGENCY_SIGNAL:
384 vcpu->stat.instruction_sigp_emergency++; 339 vcpu->stat.instruction_sigp_emergency++;
385 rc = __sigp_emergency(vcpu, cpu_addr); 340 rc = __sigp_emergency(vcpu, dst_vcpu);
386 break; 341 break;
387 case SIGP_STOP: 342 case SIGP_STOP:
388 vcpu->stat.instruction_sigp_stop++; 343 vcpu->stat.instruction_sigp_stop++;
389 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP); 344 rc = __sigp_stop(vcpu, dst_vcpu);
390 break; 345 break;
391 case SIGP_STOP_AND_STORE_STATUS: 346 case SIGP_STOP_AND_STORE_STATUS:
392 vcpu->stat.instruction_sigp_stop++; 347 vcpu->stat.instruction_sigp_stop_store_status++;
393 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP | 348 rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg);
394 ACTION_STOP_ON_STOP);
395 break; 349 break;
396 case SIGP_STORE_STATUS_AT_ADDRESS: 350 case SIGP_STORE_STATUS_AT_ADDRESS:
397 rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter, 351 vcpu->stat.instruction_sigp_store_status++;
398 &vcpu->run->s.regs.gprs[r1]); 352 rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
399 break; 353 status_reg);
400 case SIGP_SET_ARCHITECTURE:
401 vcpu->stat.instruction_sigp_arch++;
402 rc = __sigp_set_arch(vcpu, parameter);
403 break; 354 break;
404 case SIGP_SET_PREFIX: 355 case SIGP_SET_PREFIX:
405 vcpu->stat.instruction_sigp_prefix++; 356 vcpu->stat.instruction_sigp_prefix++;
406 rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, 357 rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg);
407 &vcpu->run->s.regs.gprs[r1]);
408 break; 358 break;
409 case SIGP_COND_EMERGENCY_SIGNAL: 359 case SIGP_COND_EMERGENCY_SIGNAL:
410 rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter, 360 vcpu->stat.instruction_sigp_cond_emergency++;
411 &vcpu->run->s.regs.gprs[r1]); 361 rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
362 status_reg);
412 break; 363 break;
413 case SIGP_SENSE_RUNNING: 364 case SIGP_SENSE_RUNNING:
414 vcpu->stat.instruction_sigp_sense_running++; 365 vcpu->stat.instruction_sigp_sense_running++;
415 rc = __sigp_sense_running(vcpu, cpu_addr, 366 rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg);
416 &vcpu->run->s.regs.gprs[r1]);
417 break; 367 break;
418 case SIGP_START: 368 case SIGP_START:
419 rc = sigp_check_callable(vcpu, cpu_addr); 369 vcpu->stat.instruction_sigp_start++;
420 if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) 370 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
421 rc = -EOPNOTSUPP; /* Handle START in user space */
422 break; 371 break;
423 case SIGP_RESTART: 372 case SIGP_RESTART:
424 vcpu->stat.instruction_sigp_restart++; 373 vcpu->stat.instruction_sigp_restart++;
425 rc = sigp_check_callable(vcpu, cpu_addr); 374 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
426 if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) { 375 break;
427 VCPU_EVENT(vcpu, 4, 376 case SIGP_INITIAL_CPU_RESET:
428 "sigp restart %x to handle userspace", 377 vcpu->stat.instruction_sigp_init_cpu_reset++;
429 cpu_addr); 378 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
430 /* user space must know about restart */ 379 break;
431 rc = -EOPNOTSUPP; 380 case SIGP_CPU_RESET:
432 } 381 vcpu->stat.instruction_sigp_cpu_reset++;
382 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
383 break;
384 default:
385 vcpu->stat.instruction_sigp_unknown++;
386 rc = __prepare_sigp_unknown(vcpu, dst_vcpu);
387 }
388
389 if (rc == -EOPNOTSUPP)
390 VCPU_EVENT(vcpu, 4,
391 "sigp order %u -> cpu %x: handled in user space",
392 order_code, dst_vcpu->vcpu_id);
393
394 return rc;
395}
396
397int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
398{
399 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
400 int r3 = vcpu->arch.sie_block->ipa & 0x000f;
401 u32 parameter;
402 u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
403 u8 order_code;
404 int rc;
405
406 /* sigp in userspace can exit */
407 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
408 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
409
410 order_code = kvm_s390_get_base_disp_rs(vcpu);
411
412 if (r1 % 2)
413 parameter = vcpu->run->s.regs.gprs[r1];
414 else
415 parameter = vcpu->run->s.regs.gprs[r1 + 1];
416
417 trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
418 switch (order_code) {
419 case SIGP_SET_ARCHITECTURE:
420 vcpu->stat.instruction_sigp_arch++;
421 rc = __sigp_set_arch(vcpu, parameter);
433 break; 422 break;
434 default: 423 default:
435 return -EOPNOTSUPP; 424 rc = handle_sigp_dst(vcpu, order_code, cpu_addr,
425 parameter,
426 &vcpu->run->s.regs.gprs[r1]);
436 } 427 }
437 428
438 if (rc < 0) 429 if (rc < 0)