aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/kvm_host.h6
-rw-r--r--arch/s390/kernel/entry.S2
-rw-r--r--arch/s390/kvm/intercept.c16
-rw-r--r--arch/s390/kvm/interrupt.c90
-rw-r--r--arch/s390/kvm/kvm-s390.c81
-rw-r--r--arch/s390/kvm/kvm-s390.h25
-rw-r--r--arch/s390/kvm/priv.c8
7 files changed, 140 insertions, 88 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index d01fc588b5c3..3024acbe1f9d 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -80,6 +80,7 @@ struct sca_block {
80#define CPUSTAT_MCDS 0x00000100 80#define CPUSTAT_MCDS 0x00000100
81#define CPUSTAT_SM 0x00000080 81#define CPUSTAT_SM 0x00000080
82#define CPUSTAT_IBS 0x00000040 82#define CPUSTAT_IBS 0x00000040
83#define CPUSTAT_GED2 0x00000010
83#define CPUSTAT_G 0x00000008 84#define CPUSTAT_G 0x00000008
84#define CPUSTAT_GED 0x00000004 85#define CPUSTAT_GED 0x00000004
85#define CPUSTAT_J 0x00000002 86#define CPUSTAT_J 0x00000002
@@ -95,7 +96,8 @@ struct kvm_s390_sie_block {
95#define PROG_IN_SIE (1<<0) 96#define PROG_IN_SIE (1<<0)
96 __u32 prog0c; /* 0x000c */ 97 __u32 prog0c; /* 0x000c */
97 __u8 reserved10[16]; /* 0x0010 */ 98 __u8 reserved10[16]; /* 0x0010 */
98#define PROG_BLOCK_SIE 0x00000001 99#define PROG_BLOCK_SIE (1<<0)
100#define PROG_REQUEST (1<<1)
99 atomic_t prog20; /* 0x0020 */ 101 atomic_t prog20; /* 0x0020 */
100 __u8 reserved24[4]; /* 0x0024 */ 102 __u8 reserved24[4]; /* 0x0024 */
101 __u64 cputm; /* 0x0028 */ 103 __u64 cputm; /* 0x0028 */
@@ -634,7 +636,7 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
634static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} 636static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
635static inline void kvm_arch_free_memslot(struct kvm *kvm, 637static inline void kvm_arch_free_memslot(struct kvm *kvm,
636 struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} 638 struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
637static inline void kvm_arch_memslots_updated(struct kvm *kvm) {} 639static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
638static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} 640static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
639static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 641static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
640 struct kvm_memory_slot *slot) {} 642 struct kvm_memory_slot *slot) {}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 99b44acbfcc7..3238893c9d4f 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -1005,7 +1005,7 @@ ENTRY(sie64a)
1005.Lsie_gmap: 1005.Lsie_gmap:
1006 lg %r14,__SF_EMPTY(%r15) # get control block pointer 1006 lg %r14,__SF_EMPTY(%r15) # get control block pointer
1007 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 1007 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
1008 tm __SIE_PROG20+3(%r14),1 # last exit... 1008 tm __SIE_PROG20+3(%r14),3 # last exit...
1009 jnz .Lsie_done 1009 jnz .Lsie_done
1010 LPP __SF_EMPTY(%r15) # set guest id 1010 LPP __SF_EMPTY(%r15) # set guest id
1011 sie 0(%r14) 1011 sie 0(%r14)
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 9e3779e3e496..7365e8a46032 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -241,21 +241,6 @@ static int handle_prog(struct kvm_vcpu *vcpu)
241 return kvm_s390_inject_prog_irq(vcpu, &pgm_info); 241 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
242} 242}
243 243
244static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
245{
246 int rc, rc2;
247
248 vcpu->stat.exit_instr_and_program++;
249 rc = handle_instruction(vcpu);
250 rc2 = handle_prog(vcpu);
251
252 if (rc == -EOPNOTSUPP)
253 vcpu->arch.sie_block->icptcode = 0x04;
254 if (rc)
255 return rc;
256 return rc2;
257}
258
259/** 244/**
260 * handle_external_interrupt - used for external interruption interceptions 245 * handle_external_interrupt - used for external interruption interceptions
261 * 246 *
@@ -355,7 +340,6 @@ static const intercept_handler_t intercept_funcs[] = {
355 [0x00 >> 2] = handle_noop, 340 [0x00 >> 2] = handle_noop,
356 [0x04 >> 2] = handle_instruction, 341 [0x04 >> 2] = handle_instruction,
357 [0x08 >> 2] = handle_prog, 342 [0x08 >> 2] = handle_prog,
358 [0x0C >> 2] = handle_instruction_and_prog,
359 [0x10 >> 2] = handle_noop, 343 [0x10 >> 2] = handle_noop,
360 [0x14 >> 2] = handle_external_interrupt, 344 [0x14 >> 2] = handle_external_interrupt,
361 [0x18 >> 2] = handle_noop, 345 [0x18 >> 2] = handle_noop,
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 0d3deef6edff..c98d89708e99 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -134,6 +134,8 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
134 134
135 active_mask = pending_local_irqs(vcpu); 135 active_mask = pending_local_irqs(vcpu);
136 active_mask |= pending_floating_irqs(vcpu); 136 active_mask |= pending_floating_irqs(vcpu);
137 if (!active_mask)
138 return 0;
137 139
138 if (psw_extint_disabled(vcpu)) 140 if (psw_extint_disabled(vcpu))
139 active_mask &= ~IRQ_PEND_EXT_MASK; 141 active_mask &= ~IRQ_PEND_EXT_MASK;
@@ -941,12 +943,9 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
941 if (cpu_timer_irq_pending(vcpu)) 943 if (cpu_timer_irq_pending(vcpu))
942 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 944 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
943 945
944 do { 946 while ((irqs = deliverable_irqs(vcpu)) && !rc) {
945 irqs = deliverable_irqs(vcpu);
946 /* bits are in the order of interrupt priority */ 947 /* bits are in the order of interrupt priority */
947 irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT); 948 irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT);
948 if (irq_type == IRQ_PEND_COUNT)
949 break;
950 if (is_ioirq(irq_type)) { 949 if (is_ioirq(irq_type)) {
951 rc = __deliver_io(vcpu, irq_type); 950 rc = __deliver_io(vcpu, irq_type);
952 } else { 951 } else {
@@ -958,9 +957,7 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
958 } 957 }
959 rc = func(vcpu); 958 rc = func(vcpu);
960 } 959 }
961 if (rc) 960 }
962 break;
963 } while (!rc);
964 961
965 set_intercept_indicators(vcpu); 962 set_intercept_indicators(vcpu);
966 963
@@ -1061,7 +1058,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1061 if (sclp.has_sigpif) 1058 if (sclp.has_sigpif)
1062 return __inject_extcall_sigpif(vcpu, src_id); 1059 return __inject_extcall_sigpif(vcpu, src_id);
1063 1060
1064 if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) 1061 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1065 return -EBUSY; 1062 return -EBUSY;
1066 *extcall = irq->u.extcall; 1063 *extcall = irq->u.extcall;
1067 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1064 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
@@ -1340,12 +1337,54 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1340 return 0; 1337 return 0;
1341} 1338}
1342 1339
1343static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) 1340/*
1341 * Find a destination VCPU for a floating irq and kick it.
1342 */
1343static void __floating_irq_kick(struct kvm *kvm, u64 type)
1344{ 1344{
1345 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1345 struct kvm_s390_local_interrupt *li; 1346 struct kvm_s390_local_interrupt *li;
1347 struct kvm_vcpu *dst_vcpu;
1348 int sigcpu, online_vcpus, nr_tries = 0;
1349
1350 online_vcpus = atomic_read(&kvm->online_vcpus);
1351 if (!online_vcpus)
1352 return;
1353
1354 /* find idle VCPUs first, then round robin */
1355 sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
1356 if (sigcpu == online_vcpus) {
1357 do {
1358 sigcpu = fi->next_rr_cpu;
1359 fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
1360 /* avoid endless loops if all vcpus are stopped */
1361 if (nr_tries++ >= online_vcpus)
1362 return;
1363 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1364 }
1365 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1366
1367 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1368 li = &dst_vcpu->arch.local_int;
1369 spin_lock(&li->lock);
1370 switch (type) {
1371 case KVM_S390_MCHK:
1372 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
1373 break;
1374 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1375 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
1376 break;
1377 default:
1378 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1379 break;
1380 }
1381 spin_unlock(&li->lock);
1382 kvm_s390_vcpu_wakeup(dst_vcpu);
1383}
1384
1385static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1386{
1346 struct kvm_s390_float_interrupt *fi; 1387 struct kvm_s390_float_interrupt *fi;
1347 struct kvm_vcpu *dst_vcpu = NULL;
1348 int sigcpu;
1349 u64 type = READ_ONCE(inti->type); 1388 u64 type = READ_ONCE(inti->type);
1350 int rc; 1389 int rc;
1351 1390
@@ -1373,32 +1412,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1373 if (rc) 1412 if (rc)
1374 return rc; 1413 return rc;
1375 1414
1376 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); 1415 __floating_irq_kick(kvm, type);
1377 if (sigcpu == KVM_MAX_VCPUS) {
1378 do {
1379 sigcpu = fi->next_rr_cpu++;
1380 if (sigcpu == KVM_MAX_VCPUS)
1381 sigcpu = fi->next_rr_cpu = 0;
1382 } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
1383 }
1384 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1385 li = &dst_vcpu->arch.local_int;
1386 spin_lock(&li->lock);
1387 switch (type) {
1388 case KVM_S390_MCHK:
1389 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
1390 break;
1391 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1392 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
1393 break;
1394 default:
1395 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1396 break;
1397 }
1398 spin_unlock(&li->lock);
1399 kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
1400 return 0; 1416 return 0;
1401
1402} 1417}
1403 1418
1404int kvm_s390_inject_vm(struct kvm *kvm, 1419int kvm_s390_inject_vm(struct kvm *kvm,
@@ -1606,6 +1621,9 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
1606 int i; 1621 int i;
1607 1622
1608 spin_lock(&fi->lock); 1623 spin_lock(&fi->lock);
1624 fi->pending_irqs = 0;
1625 memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
1626 memset(&fi->mchk, 0, sizeof(fi->mchk));
1609 for (i = 0; i < FIRQ_LIST_COUNT; i++) 1627 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1610 clear_irq_list(&fi->lists[i]); 1628 clear_irq_list(&fi->lists[i]);
1611 for (i = 0; i < FIRQ_MAX_COUNT; i++) 1629 for (i = 0; i < FIRQ_MAX_COUNT; i++)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c4e81b26c1b0..2078f92d15ac 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -36,6 +36,10 @@
36#include "kvm-s390.h" 36#include "kvm-s390.h"
37#include "gaccess.h" 37#include "gaccess.h"
38 38
39#define KMSG_COMPONENT "kvm-s390"
40#undef pr_fmt
41#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
42
39#define CREATE_TRACE_POINTS 43#define CREATE_TRACE_POINTS
40#include "trace.h" 44#include "trace.h"
41#include "trace-s390.h" 45#include "trace-s390.h"
@@ -110,7 +114,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
110/* upper facilities limit for kvm */ 114/* upper facilities limit for kvm */
111unsigned long kvm_s390_fac_list_mask[] = { 115unsigned long kvm_s390_fac_list_mask[] = {
112 0xffe6fffbfcfdfc40UL, 116 0xffe6fffbfcfdfc40UL,
113 0x005c800000000000UL, 117 0x005e800000000000UL,
114}; 118};
115 119
116unsigned long kvm_s390_fac_list_mask_size(void) 120unsigned long kvm_s390_fac_list_mask_size(void)
@@ -236,6 +240,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
236{ 240{
237 int r; 241 int r;
238 unsigned long n; 242 unsigned long n;
243 struct kvm_memslots *slots;
239 struct kvm_memory_slot *memslot; 244 struct kvm_memory_slot *memslot;
240 int is_dirty = 0; 245 int is_dirty = 0;
241 246
@@ -245,7 +250,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
245 if (log->slot >= KVM_USER_MEM_SLOTS) 250 if (log->slot >= KVM_USER_MEM_SLOTS)
246 goto out; 251 goto out;
247 252
248 memslot = id_to_memslot(kvm->memslots, log->slot); 253 slots = kvm_memslots(kvm);
254 memslot = id_to_memslot(slots, log->slot);
249 r = -ENOENT; 255 r = -ENOENT;
250 if (!memslot->dirty_bitmap) 256 if (!memslot->dirty_bitmap)
251 goto out; 257 goto out;
@@ -454,10 +460,10 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
454 460
455 mutex_lock(&kvm->lock); 461 mutex_lock(&kvm->lock);
456 kvm->arch.epoch = gtod - host_tod; 462 kvm->arch.epoch = gtod - host_tod;
457 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) { 463 kvm_s390_vcpu_block_all(kvm);
464 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
458 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; 465 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
459 exit_sie(cur_vcpu); 466 kvm_s390_vcpu_unblock_all(kvm);
460 }
461 mutex_unlock(&kvm->lock); 467 mutex_unlock(&kvm->lock);
462 return 0; 468 return 0;
463} 469}
@@ -1311,8 +1317,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1311 1317
1312 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 1318 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1313 CPUSTAT_SM | 1319 CPUSTAT_SM |
1314 CPUSTAT_STOPPED | 1320 CPUSTAT_STOPPED);
1315 CPUSTAT_GED); 1321
1322 if (test_kvm_facility(vcpu->kvm, 78))
1323 atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1324 else if (test_kvm_facility(vcpu->kvm, 8))
1325 atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1326
1316 kvm_s390_vcpu_setup_model(vcpu); 1327 kvm_s390_vcpu_setup_model(vcpu);
1317 1328
1318 vcpu->arch.sie_block->ecb = 6; 1329 vcpu->arch.sie_block->ecb = 6;
@@ -1409,16 +1420,28 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1409 return kvm_s390_vcpu_has_irq(vcpu, 0); 1420 return kvm_s390_vcpu_has_irq(vcpu, 0);
1410} 1421}
1411 1422
1412void s390_vcpu_block(struct kvm_vcpu *vcpu) 1423void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
1413{ 1424{
1414 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 1425 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1426 exit_sie(vcpu);
1415} 1427}
1416 1428
1417void s390_vcpu_unblock(struct kvm_vcpu *vcpu) 1429void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1418{ 1430{
1419 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 1431 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1420} 1432}
1421 1433
1434static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1435{
1436 atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1437 exit_sie(vcpu);
1438}
1439
1440static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1441{
1442 atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1443}
1444
1422/* 1445/*
1423 * Kick a guest cpu out of SIE and wait until SIE is not running. 1446 * Kick a guest cpu out of SIE and wait until SIE is not running.
1424 * If the CPU is not running (e.g. waiting as idle) the function will 1447 * If the CPU is not running (e.g. waiting as idle) the function will
@@ -1430,11 +1453,11 @@ void exit_sie(struct kvm_vcpu *vcpu)
1430 cpu_relax(); 1453 cpu_relax();
1431} 1454}
1432 1455
1433/* Kick a guest cpu out of SIE and prevent SIE-reentry */ 1456/* Kick a guest cpu out of SIE to process a request synchronously */
1434void exit_sie_sync(struct kvm_vcpu *vcpu) 1457void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
1435{ 1458{
1436 s390_vcpu_block(vcpu); 1459 kvm_make_request(req, vcpu);
1437 exit_sie(vcpu); 1460 kvm_s390_vcpu_request(vcpu);
1438} 1461}
1439 1462
1440static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) 1463static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
@@ -1447,8 +1470,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1447 /* match against both prefix pages */ 1470 /* match against both prefix pages */
1448 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) { 1471 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
1449 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); 1472 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1450 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 1473 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
1451 exit_sie_sync(vcpu);
1452 } 1474 }
1453 } 1475 }
1454} 1476}
@@ -1720,8 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
1720 1742
1721static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 1743static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1722{ 1744{
1745 if (!vcpu->requests)
1746 return 0;
1723retry: 1747retry:
1724 s390_vcpu_unblock(vcpu); 1748 kvm_s390_vcpu_request_handled(vcpu);
1725 /* 1749 /*
1726 * We use MMU_RELOAD just to re-arm the ipte notifier for the 1750 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1727 * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 1751 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
@@ -1993,12 +2017,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
1993 * As PF_VCPU will be used in fault handler, between 2017 * As PF_VCPU will be used in fault handler, between
1994 * guest_enter and guest_exit should be no uaccess. 2018 * guest_enter and guest_exit should be no uaccess.
1995 */ 2019 */
1996 preempt_disable(); 2020 local_irq_disable();
1997 kvm_guest_enter(); 2021 __kvm_guest_enter();
1998 preempt_enable(); 2022 local_irq_enable();
1999 exit_reason = sie64a(vcpu->arch.sie_block, 2023 exit_reason = sie64a(vcpu->arch.sie_block,
2000 vcpu->run->s.regs.gprs); 2024 vcpu->run->s.regs.gprs);
2001 kvm_guest_exit(); 2025 local_irq_disable();
2026 __kvm_guest_exit();
2027 local_irq_enable();
2002 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 2028 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2003 2029
2004 rc = vcpu_post_run(vcpu, exit_reason); 2030 rc = vcpu_post_run(vcpu, exit_reason);
@@ -2068,7 +2094,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2068 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { 2094 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2069 kvm_s390_vcpu_start(vcpu); 2095 kvm_s390_vcpu_start(vcpu);
2070 } else if (is_vcpu_stopped(vcpu)) { 2096 } else if (is_vcpu_stopped(vcpu)) {
2071 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n", 2097 pr_err_ratelimited("can't run stopped vcpu %d\n",
2072 vcpu->vcpu_id); 2098 vcpu->vcpu_id);
2073 return -EINVAL; 2099 return -EINVAL;
2074 } 2100 }
@@ -2206,8 +2232,7 @@ int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2206static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 2232static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2207{ 2233{
2208 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); 2234 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2209 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); 2235 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
2210 exit_sie_sync(vcpu);
2211} 2236}
2212 2237
2213static void __disable_ibs_on_all_vcpus(struct kvm *kvm) 2238static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
@@ -2223,8 +2248,7 @@ static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2223static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 2248static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2224{ 2249{
2225 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); 2250 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2226 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); 2251 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
2227 exit_sie_sync(vcpu);
2228} 2252}
2229 2253
2230void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) 2254void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
@@ -2563,7 +2587,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2563/* Section: memory related */ 2587/* Section: memory related */
2564int kvm_arch_prepare_memory_region(struct kvm *kvm, 2588int kvm_arch_prepare_memory_region(struct kvm *kvm,
2565 struct kvm_memory_slot *memslot, 2589 struct kvm_memory_slot *memslot,
2566 struct kvm_userspace_memory_region *mem, 2590 const struct kvm_userspace_memory_region *mem,
2567 enum kvm_mr_change change) 2591 enum kvm_mr_change change)
2568{ 2592{
2569 /* A few sanity checks. We can have memory slots which have to be 2593 /* A few sanity checks. We can have memory slots which have to be
@@ -2581,8 +2605,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
2581} 2605}
2582 2606
2583void kvm_arch_commit_memory_region(struct kvm *kvm, 2607void kvm_arch_commit_memory_region(struct kvm *kvm,
2584 struct kvm_userspace_memory_region *mem, 2608 const struct kvm_userspace_memory_region *mem,
2585 const struct kvm_memory_slot *old, 2609 const struct kvm_memory_slot *old,
2610 const struct kvm_memory_slot *new,
2586 enum kvm_mr_change change) 2611 enum kvm_mr_change change)
2587{ 2612{
2588 int rc; 2613 int rc;
@@ -2601,7 +2626,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
2601 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, 2626 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2602 mem->guest_phys_addr, mem->memory_size); 2627 mem->guest_phys_addr, mem->memory_size);
2603 if (rc) 2628 if (rc)
2604 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); 2629 pr_warn("failed to commit memory region\n");
2605 return; 2630 return;
2606} 2631}
2607 2632
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index ca108b90ae56..c5704786e473 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -211,10 +211,10 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
211int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr); 211int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
212void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); 212void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
213void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); 213void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
214void s390_vcpu_block(struct kvm_vcpu *vcpu); 214void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
215void s390_vcpu_unblock(struct kvm_vcpu *vcpu); 215void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
216void exit_sie(struct kvm_vcpu *vcpu); 216void exit_sie(struct kvm_vcpu *vcpu);
217void exit_sie_sync(struct kvm_vcpu *vcpu); 217void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
218int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); 218int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
219void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu); 219void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
220/* is cmma enabled */ 220/* is cmma enabled */
@@ -228,6 +228,25 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
228int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, 228int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
229 struct kvm_s390_pgm_info *pgm_info); 229 struct kvm_s390_pgm_info *pgm_info);
230 230
231static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
232{
233 int i;
234 struct kvm_vcpu *vcpu;
235
236 WARN_ON(!mutex_is_locked(&kvm->lock));
237 kvm_for_each_vcpu(i, vcpu, kvm)
238 kvm_s390_vcpu_block(vcpu);
239}
240
241static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
242{
243 int i;
244 struct kvm_vcpu *vcpu;
245
246 kvm_for_each_vcpu(i, vcpu, kvm)
247 kvm_s390_vcpu_unblock(vcpu);
248}
249
231/** 250/**
232 * kvm_s390_inject_prog_cond - conditionally inject a program check 251 * kvm_s390_inject_prog_cond - conditionally inject a program check
233 * @vcpu: virtual cpu 252 * @vcpu: virtual cpu
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index d22d8ee1ff9d..ad4242245771 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -698,10 +698,14 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
698 case 0x00001000: 698 case 0x00001000:
699 end = (start + (1UL << 20)) & ~((1UL << 20) - 1); 699 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
700 break; 700 break;
701 /* We dont support EDAT2
702 case 0x00002000: 701 case 0x00002000:
702 /* only support 2G frame size if EDAT2 is available and we are
703 not in 24-bit addressing mode */
704 if (!test_kvm_facility(vcpu->kvm, 78) ||
705 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT)
706 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
703 end = (start + (1UL << 31)) & ~((1UL << 31) - 1); 707 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
704 break;*/ 708 break;
705 default: 709 default:
706 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 710 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
707 } 711 }