aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-04-30 06:29:41 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-04-30 06:29:41 -0400
commit57b5981cd38cbca3554c5e663b2361d9adea70c2 (patch)
tree7e5403858549becc65710a15889b3c69d498ba8e
parente4c9a5a17567f8ea975bdcfdd1bf9d63965de6c9 (diff)
parent8ad357551797b1edc184fb9f6a4f80a6fa626459 (diff)
Merge tag 'kvm-s390-20140429' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next
1. Guest handling fixes The handling of MVPG, PFMF and Test Block is fixed to better follow the architecture. None of these fixes is critical for any current Linux guests, but let's play safe. 2. Optimization for single CPU guests We can enable the IBS facility if only one VCPU is running (!STOPPED state). We also enable this optimization for guest > 1 VCPU as soon as all but one VCPU is in stopped state. Thus will help guests that have tools like cpuplugd (from s390-utils) that do dynamic offline/ online of CPUs. 3. NOTES There is one non-s390 change in include/linux/kvm_host.h that introduces 2 defines for VCPU requests: define KVM_REQ_ENABLE_IBS 23 define KVM_REQ_DISABLE_IBS 24
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/kvm/diag.c2
-rw-r--r--arch/s390/kvm/gaccess.c28
-rw-r--r--arch/s390/kvm/gaccess.h1
-rw-r--r--arch/s390/kvm/intercept.c58
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c139
-rw-r--r--arch/s390/kvm/kvm-s390.h2
-rw-r--r--arch/s390/kvm/priv.c21
-rw-r--r--arch/s390/kvm/trace-s390.h43
-rw-r--r--include/linux/kvm_host.h2
11 files changed, 287 insertions, 13 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 0d45f6fe734f..f0a1dc5e5d1f 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -72,6 +72,7 @@ struct sca_block {
72#define CPUSTAT_ZARCH 0x00000800 72#define CPUSTAT_ZARCH 0x00000800
73#define CPUSTAT_MCDS 0x00000100 73#define CPUSTAT_MCDS 0x00000100
74#define CPUSTAT_SM 0x00000080 74#define CPUSTAT_SM 0x00000080
75#define CPUSTAT_IBS 0x00000040
75#define CPUSTAT_G 0x00000008 76#define CPUSTAT_G 0x00000008
76#define CPUSTAT_GED 0x00000004 77#define CPUSTAT_GED 0x00000004
77#define CPUSTAT_J 0x00000002 78#define CPUSTAT_J 0x00000002
@@ -411,6 +412,7 @@ struct kvm_arch{
411 int use_cmma; 412 int use_cmma;
412 struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; 413 struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
413 wait_queue_head_t ipte_wq; 414 wait_queue_head_t ipte_wq;
415 spinlock_t start_stop_lock;
414}; 416};
415 417
416#define KVM_HVA_ERR_BAD (-1UL) 418#define KVM_HVA_ERR_BAD (-1UL)
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 5521ace8b60d..004d385d9519 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -176,7 +176,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
176 return -EOPNOTSUPP; 176 return -EOPNOTSUPP;
177 } 177 }
178 178
179 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 179 kvm_s390_vcpu_stop(vcpu);
180 vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; 180 vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
181 vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; 181 vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
182 vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; 182 vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 691fdb776c90..db608c3f9303 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -643,3 +643,31 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
643 } 643 }
644 return rc; 644 return rc;
645} 645}
646
647/**
648 * kvm_s390_check_low_addr_protection - check for low-address protection
649 * @ga: Guest address
650 *
651 * Checks whether an address is subject to low-address protection and set
652 * up vcpu->arch.pgm accordingly if necessary.
653 *
654 * Return: 0 if no protection exception, or PGM_PROTECTION if protected.
655 */
656int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga)
657{
658 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
659 psw_t *psw = &vcpu->arch.sie_block->gpsw;
660 struct trans_exc_code_bits *tec_bits;
661
662 if (!is_low_address(ga) || !low_address_protection_enabled(vcpu))
663 return 0;
664
665 memset(pgm, 0, sizeof(*pgm));
666 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
667 tec_bits->fsi = FSI_STORE;
668 tec_bits->as = psw_bits(*psw).as;
669 tec_bits->addr = ga >> PAGE_SHIFT;
670 pgm->code = PGM_PROTECTION;
671
672 return pgm->code;
673}
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 1079c8fc6d0d..68db43e4254f 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -325,5 +325,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
325} 325}
326 326
327int ipte_lock_held(struct kvm_vcpu *vcpu); 327int ipte_lock_held(struct kvm_vcpu *vcpu);
328int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga);
328 329
329#endif /* __KVM_S390_GACCESS_H */ 330#endif /* __KVM_S390_GACCESS_H */
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 30e1c5eb726a..99e4b76e3487 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * in-kernel handling for sie intercepts 2 * in-kernel handling for sie intercepts
3 * 3 *
4 * Copyright IBM Corp. 2008, 2009 4 * Copyright IBM Corp. 2008, 2014
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -65,8 +65,7 @@ static int handle_stop(struct kvm_vcpu *vcpu)
65 trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits); 65 trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
66 66
67 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { 67 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
68 atomic_set_mask(CPUSTAT_STOPPED, 68 kvm_s390_vcpu_stop(vcpu);
69 &vcpu->arch.sie_block->cpuflags);
70 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; 69 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
71 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); 70 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
72 rc = -EOPNOTSUPP; 71 rc = -EOPNOTSUPP;
@@ -234,6 +233,58 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
234 return rc2; 233 return rc2;
235} 234}
236 235
236/**
237 * Handle MOVE PAGE partial execution interception.
238 *
239 * This interception can only happen for guests with DAT disabled and
240 * addresses that are currently not mapped in the host. Thus we try to
241 * set up the mappings for the corresponding user pages here (or throw
242 * addressing exceptions in case of illegal guest addresses).
243 */
244static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
245{
246 unsigned long hostaddr, srcaddr, dstaddr;
247 psw_t *psw = &vcpu->arch.sie_block->gpsw;
248 struct mm_struct *mm = current->mm;
249 int reg1, reg2, rc;
250
251 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
252 srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]);
253 dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]);
254
255 /* Make sure that the source is paged-in */
256 hostaddr = gmap_fault(srcaddr, vcpu->arch.gmap);
257 if (IS_ERR_VALUE(hostaddr))
258 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
259 down_read(&mm->mmap_sem);
260 rc = get_user_pages(current, mm, hostaddr, 1, 0, 0, NULL, NULL);
261 up_read(&mm->mmap_sem);
262 if (rc < 0)
263 return rc;
264
265 /* Make sure that the destination is paged-in */
266 hostaddr = gmap_fault(dstaddr, vcpu->arch.gmap);
267 if (IS_ERR_VALUE(hostaddr))
268 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
269 down_read(&mm->mmap_sem);
270 rc = get_user_pages(current, mm, hostaddr, 1, 1, 0, NULL, NULL);
271 up_read(&mm->mmap_sem);
272 if (rc < 0)
273 return rc;
274
275 psw->addr = __rewind_psw(*psw, 4);
276
277 return 0;
278}
279
280static int handle_partial_execution(struct kvm_vcpu *vcpu)
281{
282 if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
283 return handle_mvpg_pei(vcpu);
284
285 return -EOPNOTSUPP;
286}
287
237static const intercept_handler_t intercept_funcs[] = { 288static const intercept_handler_t intercept_funcs[] = {
238 [0x00 >> 2] = handle_noop, 289 [0x00 >> 2] = handle_noop,
239 [0x04 >> 2] = handle_instruction, 290 [0x04 >> 2] = handle_instruction,
@@ -245,6 +296,7 @@ static const intercept_handler_t intercept_funcs[] = {
245 [0x1C >> 2] = kvm_s390_handle_wait, 296 [0x1C >> 2] = kvm_s390_handle_wait,
246 [0x20 >> 2] = handle_validity, 297 [0x20 >> 2] = handle_validity,
247 [0x28 >> 2] = handle_stop, 298 [0x28 >> 2] = handle_stop,
299 [0x38 >> 2] = handle_partial_execution,
248}; 300};
249 301
250int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) 302int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 077e4738ebdc..d9526bb29194 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -413,7 +413,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
413 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), 413 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
414 &vcpu->arch.sie_block->gpsw, 414 &vcpu->arch.sie_block->gpsw,
415 sizeof(psw_t)); 415 sizeof(psw_t));
416 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 416 kvm_s390_vcpu_start(vcpu);
417 break; 417 break;
418 case KVM_S390_PROGRAM_INT: 418 case KVM_S390_PROGRAM_INT:
419 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", 419 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index b32c42cbc706..0a01744cbdd9 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -458,6 +458,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
458 kvm->arch.css_support = 0; 458 kvm->arch.css_support = 0;
459 kvm->arch.use_irqchip = 0; 459 kvm->arch.use_irqchip = 0;
460 460
461 spin_lock_init(&kvm->arch.start_stop_lock);
462
461 return 0; 463 return 0;
462out_nogmap: 464out_nogmap:
463 debug_unregister(kvm->arch.dbf); 465 debug_unregister(kvm->arch.dbf);
@@ -592,7 +594,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
592 vcpu->arch.sie_block->pp = 0; 594 vcpu->arch.sie_block->pp = 0;
593 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 595 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
594 kvm_clear_async_pf_completion_queue(vcpu); 596 kvm_clear_async_pf_completion_queue(vcpu);
595 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 597 kvm_s390_vcpu_stop(vcpu);
596 kvm_s390_clear_local_irqs(vcpu); 598 kvm_s390_clear_local_irqs(vcpu);
597} 599}
598 600
@@ -996,8 +998,15 @@ bool kvm_s390_cmma_enabled(struct kvm *kvm)
996 return true; 998 return true;
997} 999}
998 1000
1001static bool ibs_enabled(struct kvm_vcpu *vcpu)
1002{
1003 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1004}
1005
999static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 1006static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1000{ 1007{
1008retry:
1009 s390_vcpu_unblock(vcpu);
1001 /* 1010 /*
1002 * We use MMU_RELOAD just to re-arm the ipte notifier for the 1011 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1003 * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 1012 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
@@ -1005,15 +1014,34 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1005 * already finished. We might race against a second unmapper that 1014 * already finished. We might race against a second unmapper that
1006 * wants to set the blocking bit. Lets just retry the request loop. 1015 * wants to set the blocking bit. Lets just retry the request loop.
1007 */ 1016 */
1008 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { 1017 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
1009 int rc; 1018 int rc;
1010 rc = gmap_ipte_notify(vcpu->arch.gmap, 1019 rc = gmap_ipte_notify(vcpu->arch.gmap,
1011 vcpu->arch.sie_block->prefix, 1020 vcpu->arch.sie_block->prefix,
1012 PAGE_SIZE * 2); 1021 PAGE_SIZE * 2);
1013 if (rc) 1022 if (rc)
1014 return rc; 1023 return rc;
1015 s390_vcpu_unblock(vcpu); 1024 goto retry;
1025 }
1026
1027 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1028 if (!ibs_enabled(vcpu)) {
1029 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1030 atomic_set_mask(CPUSTAT_IBS,
1031 &vcpu->arch.sie_block->cpuflags);
1032 }
1033 goto retry;
1016 } 1034 }
1035
1036 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1037 if (ibs_enabled(vcpu)) {
1038 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1039 atomic_clear_mask(CPUSTAT_IBS,
1040 &vcpu->arch.sie_block->cpuflags);
1041 }
1042 goto retry;
1043 }
1044
1017 return 0; 1045 return 0;
1018} 1046}
1019 1047
@@ -1235,7 +1263,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1235 if (vcpu->sigset_active) 1263 if (vcpu->sigset_active)
1236 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 1264 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1237 1265
1238 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 1266 kvm_s390_vcpu_start(vcpu);
1239 1267
1240 switch (kvm_run->exit_reason) { 1268 switch (kvm_run->exit_reason) {
1241 case KVM_EXIT_S390_SIEIC: 1269 case KVM_EXIT_S390_SIEIC:
@@ -1362,6 +1390,109 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1362 return kvm_s390_store_status_unloaded(vcpu, addr); 1390 return kvm_s390_store_status_unloaded(vcpu, addr);
1363} 1391}
1364 1392
1393static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
1394{
1395 return atomic_read(&(vcpu)->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
1396}
1397
1398static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1399{
1400 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1401 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1402 exit_sie_sync(vcpu);
1403}
1404
1405static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1406{
1407 unsigned int i;
1408 struct kvm_vcpu *vcpu;
1409
1410 kvm_for_each_vcpu(i, vcpu, kvm) {
1411 __disable_ibs_on_vcpu(vcpu);
1412 }
1413}
1414
1415static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1416{
1417 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1418 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1419 exit_sie_sync(vcpu);
1420}
1421
1422void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1423{
1424 int i, online_vcpus, started_vcpus = 0;
1425
1426 if (!is_vcpu_stopped(vcpu))
1427 return;
1428
1429 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
1430 /* Only one cpu at a time may enter/leave the STOPPED state. */
1431 spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
1432 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1433
1434 for (i = 0; i < online_vcpus; i++) {
1435 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1436 started_vcpus++;
1437 }
1438
1439 if (started_vcpus == 0) {
1440 /* we're the only active VCPU -> speed it up */
1441 __enable_ibs_on_vcpu(vcpu);
1442 } else if (started_vcpus == 1) {
1443 /*
1444 * As we are starting a second VCPU, we have to disable
1445 * the IBS facility on all VCPUs to remove potentially
1446 * oustanding ENABLE requests.
1447 */
1448 __disable_ibs_on_all_vcpus(vcpu->kvm);
1449 }
1450
1451 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1452 /*
1453 * Another VCPU might have used IBS while we were offline.
1454 * Let's play safe and flush the VCPU at startup.
1455 */
1456 vcpu->arch.sie_block->ihcpu = 0xffff;
1457 spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
1458 return;
1459}
1460
1461void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1462{
1463 int i, online_vcpus, started_vcpus = 0;
1464 struct kvm_vcpu *started_vcpu = NULL;
1465
1466 if (is_vcpu_stopped(vcpu))
1467 return;
1468
1469 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
1470 /* Only one cpu at a time may enter/leave the STOPPED state. */
1471 spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
1472 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1473
1474 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1475 __disable_ibs_on_vcpu(vcpu);
1476
1477 for (i = 0; i < online_vcpus; i++) {
1478 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1479 started_vcpus++;
1480 started_vcpu = vcpu->kvm->vcpus[i];
1481 }
1482 }
1483
1484 if (started_vcpus == 1) {
1485 /*
1486 * As we only have one VCPU left, we want to enable the
1487 * IBS facility for that VCPU to speed it up.
1488 */
1489 __enable_ibs_on_vcpu(started_vcpu);
1490 }
1491
1492 spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
1493 return;
1494}
1495
1365static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 1496static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1366 struct kvm_enable_cap *cap) 1497 struct kvm_enable_cap *cap)
1367{ 1498{
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 9b5680d1f6cc..c28423a3acc0 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -157,6 +157,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
157/* implemented in kvm-s390.c */ 157/* implemented in kvm-s390.c */
158int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); 158int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
159int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); 159int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
160void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
161void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
160void s390_vcpu_block(struct kvm_vcpu *vcpu); 162void s390_vcpu_block(struct kvm_vcpu *vcpu);
161void s390_vcpu_unblock(struct kvm_vcpu *vcpu); 163void s390_vcpu_unblock(struct kvm_vcpu *vcpu);
162void exit_sie(struct kvm_vcpu *vcpu); 164void exit_sie(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 27f9051a78f8..07d0c1025cb9 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -206,6 +206,9 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
206 206
207 kvm_s390_get_regs_rre(vcpu, NULL, &reg2); 207 kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
208 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 208 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
209 addr = kvm_s390_logical_to_effective(vcpu, addr);
210 if (kvm_s390_check_low_addr_protection(vcpu, addr))
211 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
209 addr = kvm_s390_real_to_abs(vcpu, addr); 212 addr = kvm_s390_real_to_abs(vcpu, addr);
210 213
211 if (kvm_is_error_gpa(vcpu->kvm, addr)) 214 if (kvm_is_error_gpa(vcpu->kvm, addr))
@@ -650,6 +653,11 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
650 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 653 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
651 654
652 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 655 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
656 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
657 if (kvm_s390_check_low_addr_protection(vcpu, start))
658 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
659 }
660
653 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 661 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
654 case 0x00000000: 662 case 0x00000000:
655 end = (start + (1UL << 12)) & ~((1UL << 12) - 1); 663 end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
@@ -665,10 +673,15 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
665 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 673 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
666 } 674 }
667 while (start < end) { 675 while (start < end) {
668 unsigned long useraddr; 676 unsigned long useraddr, abs_addr;
669 677
670 useraddr = gmap_translate(start, vcpu->arch.gmap); 678 /* Translate guest address to host address */
671 if (IS_ERR((void *)useraddr)) 679 if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0)
680 abs_addr = kvm_s390_real_to_abs(vcpu, start);
681 else
682 abs_addr = start;
683 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr));
684 if (kvm_is_error_hva(useraddr))
672 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 685 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
673 686
674 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 687 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 13f30f58a2df..647e9d6a4818 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -68,6 +68,27 @@ TRACE_EVENT(kvm_s390_destroy_vcpu,
68 ); 68 );
69 69
70/* 70/*
71 * Trace point for start and stop of vpcus.
72 */
73TRACE_EVENT(kvm_s390_vcpu_start_stop,
74 TP_PROTO(unsigned int id, int state),
75 TP_ARGS(id, state),
76
77 TP_STRUCT__entry(
78 __field(unsigned int, id)
79 __field(int, state)
80 ),
81
82 TP_fast_assign(
83 __entry->id = id;
84 __entry->state = state;
85 ),
86
87 TP_printk("%s cpu %d", __entry->state ? "starting" : "stopping",
88 __entry->id)
89 );
90
91/*
71 * Trace points for injection of interrupts, either per machine or 92 * Trace points for injection of interrupts, either per machine or
72 * per vcpu. 93 * per vcpu.
73 */ 94 */
@@ -223,6 +244,28 @@ TRACE_EVENT(kvm_s390_enable_css,
223 __entry->kvm) 244 __entry->kvm)
224 ); 245 );
225 246
247/*
248 * Trace point for enabling and disabling interlocking-and-broadcasting
249 * suppression.
250 */
251TRACE_EVENT(kvm_s390_enable_disable_ibs,
252 TP_PROTO(unsigned int id, int state),
253 TP_ARGS(id, state),
254
255 TP_STRUCT__entry(
256 __field(unsigned int, id)
257 __field(int, state)
258 ),
259
260 TP_fast_assign(
261 __entry->id = id;
262 __entry->state = state;
263 ),
264
265 TP_printk("%s ibs on cpu %d",
266 __entry->state ? "enabling" : "disabling", __entry->id)
267 );
268
226 269
227#endif /* _TRACE_KVMS390_H */ 270#endif /* _TRACE_KVMS390_H */
228 271
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 820fc2e1d9df..1e125b055327 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -134,6 +134,8 @@ static inline bool is_error_page(struct page *page)
134#define KVM_REQ_EPR_EXIT 20 134#define KVM_REQ_EPR_EXIT 20
135#define KVM_REQ_SCAN_IOAPIC 21 135#define KVM_REQ_SCAN_IOAPIC 21
136#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22 136#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
137#define KVM_REQ_ENABLE_IBS 23
138#define KVM_REQ_DISABLE_IBS 24
137 139
138#define KVM_USERSPACE_IRQ_SOURCE_ID 0 140#define KVM_USERSPACE_IRQ_SOURCE_ID 0
139#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 141#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1