aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 16:21:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 16:21:40 -0400
commitfe489bf4505ae26d3c6d6a1f1d3064c2a9c5cd85 (patch)
tree46596fd7edf7c4da1dafdb2c62011841e71cf32d /arch/s390
parent3e34131a65127e73fbae68c82748f32c8af7e4a4 (diff)
parenta3ff5fbc94a829680d4aa005cd17add1c1a1fb5b (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "On the x86 side, there are some optimizations and documentation updates. The big ARM/KVM change for 3.11, support for AArch64, will come through Catalin Marinas's tree. s390 and PPC have misc cleanups and bugfixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (87 commits) KVM: PPC: Ignore PIR writes KVM: PPC: Book3S PR: Invalidate SLB entries properly KVM: PPC: Book3S PR: Allow guest to use 1TB segments KVM: PPC: Book3S PR: Don't keep scanning HPTEG after we find a match KVM: PPC: Book3S PR: Fix invalidation of SLB entry 0 on guest entry KVM: PPC: Book3S PR: Fix proto-VSID calculations KVM: PPC: Guard doorbell exception with CONFIG_PPC_DOORBELL KVM: Fix RTC interrupt coalescing tracking kvm: Add a tracepoint write_tsc_offset KVM: MMU: Inform users of mmio generation wraparound KVM: MMU: document fast invalidate all mmio sptes KVM: MMU: document fast invalidate all pages KVM: MMU: document fast page fault KVM: MMU: document mmio page fault KVM: MMU: document write_flooding_count KVM: MMU: document clear_spte_count KVM: MMU: drop kvm_mmu_zap_mmio_sptes KVM: MMU: init kvm generation close to mmio wrap-around value KVM: MMU: add tracepoint for check_mmio_spte KVM: MMU: fast invalidate all mmio sptes ...
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/kvm_host.h18
-rw-r--r--arch/s390/include/asm/perf_event.h10
-rw-r--r--arch/s390/include/asm/pgtable.h83
-rw-r--r--arch/s390/kernel/asm-offsets.c3
-rw-r--r--arch/s390/kernel/entry64.S81
-rw-r--r--arch/s390/kernel/perf_event.c52
-rw-r--r--arch/s390/kernel/s390_ksyms.c1
-rw-r--r--arch/s390/kvm/Makefile3
-rw-r--r--arch/s390/kvm/diag.c3
-rw-r--r--arch/s390/kvm/intercept.c124
-rw-r--r--arch/s390/kvm/interrupt.c18
-rw-r--r--arch/s390/kvm/kvm-s390.c105
-rw-r--r--arch/s390/kvm/kvm-s390.h14
-rw-r--r--arch/s390/kvm/priv.c274
-rw-r--r--arch/s390/kvm/sigp.c19
-rw-r--r--arch/s390/mm/pgtable.c2
16 files changed, 527 insertions, 283 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 16bd5d169cdb..3238d4004e84 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -62,13 +62,20 @@ struct sca_block {
62#define CPUSTAT_MCDS 0x00000100 62#define CPUSTAT_MCDS 0x00000100
63#define CPUSTAT_SM 0x00000080 63#define CPUSTAT_SM 0x00000080
64#define CPUSTAT_G 0x00000008 64#define CPUSTAT_G 0x00000008
65#define CPUSTAT_GED 0x00000004
65#define CPUSTAT_J 0x00000002 66#define CPUSTAT_J 0x00000002
66#define CPUSTAT_P 0x00000001 67#define CPUSTAT_P 0x00000001
67 68
68struct kvm_s390_sie_block { 69struct kvm_s390_sie_block {
69 atomic_t cpuflags; /* 0x0000 */ 70 atomic_t cpuflags; /* 0x0000 */
70 __u32 prefix; /* 0x0004 */ 71 __u32 prefix; /* 0x0004 */
71 __u8 reserved8[32]; /* 0x0008 */ 72 __u8 reserved08[4]; /* 0x0008 */
73#define PROG_IN_SIE (1<<0)
74 __u32 prog0c; /* 0x000c */
75 __u8 reserved10[16]; /* 0x0010 */
76#define PROG_BLOCK_SIE 0x00000001
77 atomic_t prog20; /* 0x0020 */
78 __u8 reserved24[4]; /* 0x0024 */
72 __u64 cputm; /* 0x0028 */ 79 __u64 cputm; /* 0x0028 */
73 __u64 ckc; /* 0x0030 */ 80 __u64 ckc; /* 0x0030 */
74 __u64 epoch; /* 0x0038 */ 81 __u64 epoch; /* 0x0038 */
@@ -90,7 +97,8 @@ struct kvm_s390_sie_block {
90 __u32 scaoh; /* 0x005c */ 97 __u32 scaoh; /* 0x005c */
91 __u8 reserved60; /* 0x0060 */ 98 __u8 reserved60; /* 0x0060 */
92 __u8 ecb; /* 0x0061 */ 99 __u8 ecb; /* 0x0061 */
93 __u8 reserved62[2]; /* 0x0062 */ 100 __u8 ecb2; /* 0x0062 */
101 __u8 reserved63[1]; /* 0x0063 */
94 __u32 scaol; /* 0x0064 */ 102 __u32 scaol; /* 0x0064 */
95 __u8 reserved68[4]; /* 0x0068 */ 103 __u8 reserved68[4]; /* 0x0068 */
96 __u32 todpr; /* 0x006c */ 104 __u32 todpr; /* 0x006c */
@@ -130,6 +138,7 @@ struct kvm_vcpu_stat {
130 u32 deliver_program_int; 138 u32 deliver_program_int;
131 u32 deliver_io_int; 139 u32 deliver_io_int;
132 u32 exit_wait_state; 140 u32 exit_wait_state;
141 u32 instruction_pfmf;
133 u32 instruction_stidp; 142 u32 instruction_stidp;
134 u32 instruction_spx; 143 u32 instruction_spx;
135 u32 instruction_stpx; 144 u32 instruction_stpx;
@@ -166,7 +175,7 @@ struct kvm_s390_ext_info {
166}; 175};
167 176
168#define PGM_OPERATION 0x01 177#define PGM_OPERATION 0x01
169#define PGM_PRIVILEGED_OPERATION 0x02 178#define PGM_PRIVILEGED_OP 0x02
170#define PGM_EXECUTE 0x03 179#define PGM_EXECUTE 0x03
171#define PGM_PROTECTION 0x04 180#define PGM_PROTECTION 0x04
172#define PGM_ADDRESSING 0x05 181#define PGM_ADDRESSING 0x05
@@ -219,7 +228,7 @@ struct kvm_s390_local_interrupt {
219 atomic_t active; 228 atomic_t active;
220 struct kvm_s390_float_interrupt *float_int; 229 struct kvm_s390_float_interrupt *float_int;
221 int timer_due; /* event indicator for waitqueue below */ 230 int timer_due; /* event indicator for waitqueue below */
222 wait_queue_head_t wq; 231 wait_queue_head_t *wq;
223 atomic_t *cpuflags; 232 atomic_t *cpuflags;
224 unsigned int action_bits; 233 unsigned int action_bits;
225}; 234};
@@ -266,4 +275,5 @@ struct kvm_arch{
266}; 275};
267 276
268extern int sie64a(struct kvm_s390_sie_block *, u64 *); 277extern int sie64a(struct kvm_s390_sie_block *, u64 *);
278extern char sie_exit;
269#endif 279#endif
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 5f0173a31693..1141fb3e7b21 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -14,3 +14,13 @@
14/* Per-CPU flags for PMU states */ 14/* Per-CPU flags for PMU states */
15#define PMU_F_RESERVED 0x1000 15#define PMU_F_RESERVED 0x1000
16#define PMU_F_ENABLED 0x2000 16#define PMU_F_ENABLED 0x2000
17
18#ifdef CONFIG_64BIT
19
20/* Perf callbacks */
21struct pt_regs;
22extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
23extern unsigned long perf_misc_flags(struct pt_regs *regs);
24#define perf_misc_flags(regs) perf_misc_flags(regs)
25
26#endif /* CONFIG_64BIT */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 9aefa3c64eb2..0ea4e591fa78 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -296,18 +296,16 @@ extern unsigned long MODULES_END;
296#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 296#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
297 297
298/* Page status table bits for virtualization */ 298/* Page status table bits for virtualization */
299#define RCP_ACC_BITS 0xf0000000UL 299#define PGSTE_ACC_BITS 0xf0000000UL
300#define RCP_FP_BIT 0x08000000UL 300#define PGSTE_FP_BIT 0x08000000UL
301#define RCP_PCL_BIT 0x00800000UL 301#define PGSTE_PCL_BIT 0x00800000UL
302#define RCP_HR_BIT 0x00400000UL 302#define PGSTE_HR_BIT 0x00400000UL
303#define RCP_HC_BIT 0x00200000UL 303#define PGSTE_HC_BIT 0x00200000UL
304#define RCP_GR_BIT 0x00040000UL 304#define PGSTE_GR_BIT 0x00040000UL
305#define RCP_GC_BIT 0x00020000UL 305#define PGSTE_GC_BIT 0x00020000UL
306#define RCP_IN_BIT 0x00002000UL /* IPTE notify bit */ 306#define PGSTE_UR_BIT 0x00008000UL
307 307#define PGSTE_UC_BIT 0x00004000UL /* user dirty (migration) */
308/* User dirty / referenced bit for KVM's migration feature */ 308#define PGSTE_IN_BIT 0x00002000UL /* IPTE notify bit */
309#define KVM_UR_BIT 0x00008000UL
310#define KVM_UC_BIT 0x00004000UL
311 309
312#else /* CONFIG_64BIT */ 310#else /* CONFIG_64BIT */
313 311
@@ -364,18 +362,16 @@ extern unsigned long MODULES_END;
364 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO) 362 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
365 363
366/* Page status table bits for virtualization */ 364/* Page status table bits for virtualization */
367#define RCP_ACC_BITS 0xf000000000000000UL 365#define PGSTE_ACC_BITS 0xf000000000000000UL
368#define RCP_FP_BIT 0x0800000000000000UL 366#define PGSTE_FP_BIT 0x0800000000000000UL
369#define RCP_PCL_BIT 0x0080000000000000UL 367#define PGSTE_PCL_BIT 0x0080000000000000UL
370#define RCP_HR_BIT 0x0040000000000000UL 368#define PGSTE_HR_BIT 0x0040000000000000UL
371#define RCP_HC_BIT 0x0020000000000000UL 369#define PGSTE_HC_BIT 0x0020000000000000UL
372#define RCP_GR_BIT 0x0004000000000000UL 370#define PGSTE_GR_BIT 0x0004000000000000UL
373#define RCP_GC_BIT 0x0002000000000000UL 371#define PGSTE_GC_BIT 0x0002000000000000UL
374#define RCP_IN_BIT 0x0000200000000000UL /* IPTE notify bit */ 372#define PGSTE_UR_BIT 0x0000800000000000UL
375 373#define PGSTE_UC_BIT 0x0000400000000000UL /* user dirty (migration) */
376/* User dirty / referenced bit for KVM's migration feature */ 374#define PGSTE_IN_BIT 0x0000200000000000UL /* IPTE notify bit */
377#define KVM_UR_BIT 0x0000800000000000UL
378#define KVM_UC_BIT 0x0000400000000000UL
379 375
380#endif /* CONFIG_64BIT */ 376#endif /* CONFIG_64BIT */
381 377
@@ -615,8 +611,8 @@ static inline pgste_t pgste_get_lock(pte_t *ptep)
615 asm( 611 asm(
616 " lg %0,%2\n" 612 " lg %0,%2\n"
617 "0: lgr %1,%0\n" 613 "0: lgr %1,%0\n"
618 " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */ 614 " nihh %0,0xff7f\n" /* clear PCL bit in old */
619 " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */ 615 " oihh %1,0x0080\n" /* set PCL bit in new */
620 " csg %0,%1,%2\n" 616 " csg %0,%1,%2\n"
621 " jl 0b\n" 617 " jl 0b\n"
622 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) 618 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
@@ -629,7 +625,7 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
629{ 625{
630#ifdef CONFIG_PGSTE 626#ifdef CONFIG_PGSTE
631 asm( 627 asm(
632 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */ 628 " nihh %1,0xff7f\n" /* clear PCL bit */
633 " stg %1,%0\n" 629 " stg %1,%0\n"
634 : "=Q" (ptep[PTRS_PER_PTE]) 630 : "=Q" (ptep[PTRS_PER_PTE])
635 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) 631 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
@@ -662,14 +658,14 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
662 else if (bits) 658 else if (bits)
663 page_reset_referenced(address); 659 page_reset_referenced(address);
664 /* Transfer page changed & referenced bit to guest bits in pgste */ 660 /* Transfer page changed & referenced bit to guest bits in pgste */
665 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */ 661 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
666 /* Get host changed & referenced bits from pgste */ 662 /* Get host changed & referenced bits from pgste */
667 bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52; 663 bits |= (pgste_val(pgste) & (PGSTE_HR_BIT | PGSTE_HC_BIT)) >> 52;
668 /* Transfer page changed & referenced bit to kvm user bits */ 664 /* Transfer page changed & referenced bit to kvm user bits */
669 pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */ 665 pgste_val(pgste) |= bits << 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */
670 /* Clear relevant host bits in pgste. */ 666 /* Clear relevant host bits in pgste. */
671 pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT); 667 pgste_val(pgste) &= ~(PGSTE_HR_BIT | PGSTE_HC_BIT);
672 pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT); 668 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
673 /* Copy page access key and fetch protection bit to pgste */ 669 /* Copy page access key and fetch protection bit to pgste */
674 pgste_val(pgste) |= 670 pgste_val(pgste) |=
675 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; 671 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
@@ -690,15 +686,15 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
690 /* Get referenced bit from storage key */ 686 /* Get referenced bit from storage key */
691 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); 687 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
692 if (young) 688 if (young)
693 pgste_val(pgste) |= RCP_GR_BIT; 689 pgste_val(pgste) |= PGSTE_GR_BIT;
694 /* Get host referenced bit from pgste */ 690 /* Get host referenced bit from pgste */
695 if (pgste_val(pgste) & RCP_HR_BIT) { 691 if (pgste_val(pgste) & PGSTE_HR_BIT) {
696 pgste_val(pgste) &= ~RCP_HR_BIT; 692 pgste_val(pgste) &= ~PGSTE_HR_BIT;
697 young = 1; 693 young = 1;
698 } 694 }
699 /* Transfer referenced bit to kvm user bits and pte */ 695 /* Transfer referenced bit to kvm user bits and pte */
700 if (young) { 696 if (young) {
701 pgste_val(pgste) |= KVM_UR_BIT; 697 pgste_val(pgste) |= PGSTE_UR_BIT;
702 pte_val(*ptep) |= _PAGE_SWR; 698 pte_val(*ptep) |= _PAGE_SWR;
703 } 699 }
704#endif 700#endif
@@ -720,7 +716,7 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
720 * The guest C/R information is still in the PGSTE, set real 716 * The guest C/R information is still in the PGSTE, set real
721 * key C/R to 0. 717 * key C/R to 0.
722 */ 718 */
723 nkey = (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; 719 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
724 page_set_storage_key(address, nkey, 0); 720 page_set_storage_key(address, nkey, 0);
725#endif 721#endif
726} 722}
@@ -750,6 +746,7 @@ struct gmap {
750 struct mm_struct *mm; 746 struct mm_struct *mm;
751 unsigned long *table; 747 unsigned long *table;
752 unsigned long asce; 748 unsigned long asce;
749 void *private;
753 struct list_head crst_list; 750 struct list_head crst_list;
754}; 751};
755 752
@@ -808,8 +805,8 @@ static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
808 pte_t *ptep, pgste_t pgste) 805 pte_t *ptep, pgste_t pgste)
809{ 806{
810#ifdef CONFIG_PGSTE 807#ifdef CONFIG_PGSTE
811 if (pgste_val(pgste) & RCP_IN_BIT) { 808 if (pgste_val(pgste) & PGSTE_IN_BIT) {
812 pgste_val(pgste) &= ~RCP_IN_BIT; 809 pgste_val(pgste) &= ~PGSTE_IN_BIT;
813 gmap_do_ipte_notify(mm, addr, ptep); 810 gmap_do_ipte_notify(mm, addr, ptep);
814 } 811 }
815#endif 812#endif
@@ -977,8 +974,8 @@ static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
977 if (mm_has_pgste(mm)) { 974 if (mm_has_pgste(mm)) {
978 pgste = pgste_get_lock(ptep); 975 pgste = pgste_get_lock(ptep);
979 pgste = pgste_update_all(ptep, pgste); 976 pgste = pgste_update_all(ptep, pgste);
980 dirty = !!(pgste_val(pgste) & KVM_UC_BIT); 977 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
981 pgste_val(pgste) &= ~KVM_UC_BIT; 978 pgste_val(pgste) &= ~PGSTE_UC_BIT;
982 pgste_set_unlock(ptep, pgste); 979 pgste_set_unlock(ptep, pgste);
983 return dirty; 980 return dirty;
984 } 981 }
@@ -997,8 +994,8 @@ static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
997 if (mm_has_pgste(mm)) { 994 if (mm_has_pgste(mm)) {
998 pgste = pgste_get_lock(ptep); 995 pgste = pgste_get_lock(ptep);
999 pgste = pgste_update_young(ptep, pgste); 996 pgste = pgste_update_young(ptep, pgste);
1000 young = !!(pgste_val(pgste) & KVM_UR_BIT); 997 young = !!(pgste_val(pgste) & PGSTE_UR_BIT);
1001 pgste_val(pgste) &= ~KVM_UR_BIT; 998 pgste_val(pgste) &= ~PGSTE_UR_BIT;
1002 pgste_set_unlock(ptep, pgste); 999 pgste_set_unlock(ptep, pgste);
1003 } 1000 }
1004 return young; 1001 return young;
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index d6de844bc30a..2416138ebd3e 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -7,6 +7,7 @@
7#define ASM_OFFSETS_C 7#define ASM_OFFSETS_C
8 8
9#include <linux/kbuild.h> 9#include <linux/kbuild.h>
10#include <linux/kvm_host.h>
10#include <linux/sched.h> 11#include <linux/sched.h>
11#include <asm/cputime.h> 12#include <asm/cputime.h>
12#include <asm/vdso.h> 13#include <asm/vdso.h>
@@ -162,6 +163,8 @@ int main(void)
162 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); 163 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
163 DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb)); 164 DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb));
164 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); 165 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
166 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
167 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
165#endif /* CONFIG_32BIT */ 168#endif /* CONFIG_32BIT */
166 return 0; 169 return 0;
167} 170}
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index bc5864c5148b..1c039d0c24c7 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -47,7 +47,6 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
47 _TIF_MCCK_PENDING) 47 _TIF_MCCK_PENDING)
48_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 48_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
49 _TIF_SYSCALL_TRACEPOINT) 49 _TIF_SYSCALL_TRACEPOINT)
50_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
51 50
52#define BASED(name) name-system_call(%r13) 51#define BASED(name) name-system_call(%r13)
53 52
@@ -81,23 +80,27 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
81#endif 80#endif
82 .endm 81 .endm
83 82
84 .macro HANDLE_SIE_INTERCEPT scratch,pgmcheck 83 .macro HANDLE_SIE_INTERCEPT scratch,reason
85#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 84#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
86 tmhh %r8,0x0001 # interrupting from user ? 85 tmhh %r8,0x0001 # interrupting from user ?
87 jnz .+42 86 jnz .+62
88 lgr \scratch,%r9 87 lgr \scratch,%r9
89 slg \scratch,BASED(.Lsie_loop) 88 slg \scratch,BASED(.Lsie_critical)
90 clg \scratch,BASED(.Lsie_length) 89 clg \scratch,BASED(.Lsie_critical_length)
91 .if \pgmcheck 90 .if \reason==1
92 # Some program interrupts are suppressing (e.g. protection). 91 # Some program interrupts are suppressing (e.g. protection).
93 # We must also check the instruction after SIE in that case. 92 # We must also check the instruction after SIE in that case.
94 # do_protection_exception will rewind to rewind_pad 93 # do_protection_exception will rewind to rewind_pad
95 jh .+22 94 jh .+42
96 .else 95 .else
97 jhe .+22 96 jhe .+42
98 .endif 97 .endif
99 lg %r9,BASED(.Lsie_loop) 98 lg %r14,__SF_EMPTY(%r15) # get control block pointer
100 LPP BASED(.Lhost_id) # set host id 99 LPP __SF_EMPTY+16(%r15) # set host id
100 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
101 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
102 larl %r9,sie_exit # skip forward to sie_exit
103 mvi __SF_EMPTY+31(%r15),\reason # set exit reason
101#endif 104#endif
102 .endm 105 .endm
103 106
@@ -450,7 +453,7 @@ ENTRY(io_int_handler)
450 lg %r12,__LC_THREAD_INFO 453 lg %r12,__LC_THREAD_INFO
451 larl %r13,system_call 454 larl %r13,system_call
452 lmg %r8,%r9,__LC_IO_OLD_PSW 455 lmg %r8,%r9,__LC_IO_OLD_PSW
453 HANDLE_SIE_INTERCEPT %r14,0 456 HANDLE_SIE_INTERCEPT %r14,2
454 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 457 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
455 tmhh %r8,0x0001 # interrupting from user? 458 tmhh %r8,0x0001 # interrupting from user?
456 jz io_skip 459 jz io_skip
@@ -603,7 +606,7 @@ ENTRY(ext_int_handler)
603 lg %r12,__LC_THREAD_INFO 606 lg %r12,__LC_THREAD_INFO
604 larl %r13,system_call 607 larl %r13,system_call
605 lmg %r8,%r9,__LC_EXT_OLD_PSW 608 lmg %r8,%r9,__LC_EXT_OLD_PSW
606 HANDLE_SIE_INTERCEPT %r14,0 609 HANDLE_SIE_INTERCEPT %r14,3
607 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 610 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
608 tmhh %r8,0x0001 # interrupting from user ? 611 tmhh %r8,0x0001 # interrupting from user ?
609 jz ext_skip 612 jz ext_skip
@@ -651,7 +654,7 @@ ENTRY(mcck_int_handler)
651 lg %r12,__LC_THREAD_INFO 654 lg %r12,__LC_THREAD_INFO
652 larl %r13,system_call 655 larl %r13,system_call
653 lmg %r8,%r9,__LC_MCK_OLD_PSW 656 lmg %r8,%r9,__LC_MCK_OLD_PSW
654 HANDLE_SIE_INTERCEPT %r14,0 657 HANDLE_SIE_INTERCEPT %r14,4
655 tm __LC_MCCK_CODE,0x80 # system damage? 658 tm __LC_MCCK_CODE,0x80 # system damage?
656 jo mcck_panic # yes -> rest of mcck code invalid 659 jo mcck_panic # yes -> rest of mcck code invalid
657 lghi %r14,__LC_CPU_TIMER_SAVE_AREA 660 lghi %r14,__LC_CPU_TIMER_SAVE_AREA
@@ -945,56 +948,50 @@ ENTRY(sie64a)
945 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 948 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
946 stg %r2,__SF_EMPTY(%r15) # save control block pointer 949 stg %r2,__SF_EMPTY(%r15) # save control block pointer
947 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area 950 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
948 xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0 951 xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
949 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 952 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
950# some program checks are suppressing. C code (e.g. do_protection_exception)
951# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
952# instructions in the sie_loop should not cause program interrupts. So
953# lets use a nop (47 00 00 00) as a landing pad.
954# See also HANDLE_SIE_INTERCEPT
955rewind_pad:
956 nop 0
957sie_loop:
958 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
959 tm __TI_flags+7(%r14),_TIF_EXIT_SIE
960 jnz sie_exit
961 lg %r14,__LC_GMAP # get gmap pointer 953 lg %r14,__LC_GMAP # get gmap pointer
962 ltgr %r14,%r14 954 ltgr %r14,%r14
963 jz sie_gmap 955 jz sie_gmap
964 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 956 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
965sie_gmap: 957sie_gmap:
966 lg %r14,__SF_EMPTY(%r15) # get control block pointer 958 lg %r14,__SF_EMPTY(%r15) # get control block pointer
959 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
960 tm __SIE_PROG20+3(%r14),1 # last exit...
961 jnz sie_done
967 LPP __SF_EMPTY(%r15) # set guest id 962 LPP __SF_EMPTY(%r15) # set guest id
968 sie 0(%r14) 963 sie 0(%r14)
969sie_done: 964sie_done:
970 LPP __SF_EMPTY+16(%r15) # set host id 965 LPP __SF_EMPTY+16(%r15) # set host id
971 lg %r14,__LC_THREAD_INFO # pointer thread_info struct 966 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
972sie_exit:
973 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 967 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
968# some program checks are suppressing. C code (e.g. do_protection_exception)
969# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
970# instructions beween sie64a and sie_done should not cause program
971# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
972# See also HANDLE_SIE_INTERCEPT
973rewind_pad:
974 nop 0
975 .globl sie_exit
976sie_exit:
974 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 977 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
975 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 978 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
976 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 979 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
977 lghi %r2,0 980 lg %r2,__SF_EMPTY+24(%r15) # return exit reason code
978 br %r14 981 br %r14
979sie_fault: 982sie_fault:
980 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 983 lghi %r14,-EFAULT
981 lg %r14,__LC_THREAD_INFO # pointer thread_info struct 984 stg %r14,__SF_EMPTY+24(%r15) # set exit reason code
982 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 985 j sie_exit
983 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
984 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
985 lghi %r2,-EFAULT
986 br %r14
987 986
988 .align 8 987 .align 8
989.Lsie_loop: 988.Lsie_critical:
990 .quad sie_loop 989 .quad sie_gmap
991.Lsie_length: 990.Lsie_critical_length:
992 .quad sie_done - sie_loop 991 .quad sie_done - sie_gmap
993.Lhost_id:
994 .quad 0
995 992
996 EX_TABLE(rewind_pad,sie_fault) 993 EX_TABLE(rewind_pad,sie_fault)
997 EX_TABLE(sie_loop,sie_fault) 994 EX_TABLE(sie_exit,sie_fault)
998#endif 995#endif
999 996
1000 .section .rodata, "a" 997 .section .rodata, "a"
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index f58f37f66824..a6fc037671b1 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/perf_event.h> 15#include <linux/perf_event.h>
16#include <linux/kvm_host.h>
16#include <linux/percpu.h> 17#include <linux/percpu.h>
17#include <linux/export.h> 18#include <linux/export.h>
18#include <asm/irq.h> 19#include <asm/irq.h>
@@ -39,6 +40,57 @@ int perf_num_counters(void)
39} 40}
40EXPORT_SYMBOL(perf_num_counters); 41EXPORT_SYMBOL(perf_num_counters);
41 42
43static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
44{
45 struct stack_frame *stack = (struct stack_frame *) regs->gprs[15];
46
47 if (!stack)
48 return NULL;
49
50 return (struct kvm_s390_sie_block *) stack->empty1[0];
51}
52
53static bool is_in_guest(struct pt_regs *regs)
54{
55 unsigned long ip = instruction_pointer(regs);
56
57 if (user_mode(regs))
58 return false;
59
60 return ip == (unsigned long) &sie_exit;
61}
62
63static unsigned long guest_is_user_mode(struct pt_regs *regs)
64{
65 return sie_block(regs)->gpsw.mask & PSW_MASK_PSTATE;
66}
67
68static unsigned long instruction_pointer_guest(struct pt_regs *regs)
69{
70 return sie_block(regs)->gpsw.addr & PSW_ADDR_INSN;
71}
72
73unsigned long perf_instruction_pointer(struct pt_regs *regs)
74{
75 return is_in_guest(regs) ? instruction_pointer_guest(regs)
76 : instruction_pointer(regs);
77}
78
79static unsigned long perf_misc_guest_flags(struct pt_regs *regs)
80{
81 return guest_is_user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
82 : PERF_RECORD_MISC_GUEST_KERNEL;
83}
84
85unsigned long perf_misc_flags(struct pt_regs *regs)
86{
87 if (is_in_guest(regs))
88 return perf_misc_guest_flags(regs);
89
90 return user_mode(regs) ? PERF_RECORD_MISC_USER
91 : PERF_RECORD_MISC_KERNEL;
92}
93
42void perf_event_print_debug(void) 94void perf_event_print_debug(void)
43{ 95{
44 struct cpumf_ctr_info cf_info; 96 struct cpumf_ctr_info cf_info;
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 9bdbcef1da9e..3bac589844a7 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -7,6 +7,7 @@ EXPORT_SYMBOL(_mcount);
7#endif 7#endif
8#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 8#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
9EXPORT_SYMBOL(sie64a); 9EXPORT_SYMBOL(sie64a);
10EXPORT_SYMBOL(sie_exit);
10#endif 11#endif
11EXPORT_SYMBOL(memcpy); 12EXPORT_SYMBOL(memcpy);
12EXPORT_SYMBOL(memset); 13EXPORT_SYMBOL(memset);
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 8fe9d65a4585..40b4c6470f88 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -6,7 +6,8 @@
6# it under the terms of the GNU General Public License (version 2 only) 6# it under the terms of the GNU General Public License (version 2 only)
7# as published by the Free Software Foundation. 7# as published by the Free Software Foundation.
8 8
9common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o eventfd.o) 9KVM := ../../../virt/kvm
10common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o
10 11
11ccflags-y := -Ivirt/kvm -Iarch/s390/kvm 12ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
12 13
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 1c01a9912989..3074475c8ae0 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -132,6 +132,9 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
132{ 132{
133 int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16; 133 int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16;
134 134
135 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
136 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
137
135 trace_kvm_s390_handle_diag(vcpu, code); 138 trace_kvm_s390_handle_diag(vcpu, code);
136 switch (code) { 139 switch (code) {
137 case 0x10: 140 case 0x10:
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index b7d1b2edeeb3..5ee56e5acc23 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -22,87 +22,6 @@
22#include "trace.h" 22#include "trace.h"
23#include "trace-s390.h" 23#include "trace-s390.h"
24 24
25static int handle_lctlg(struct kvm_vcpu *vcpu)
26{
27 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
28 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
29 u64 useraddr;
30 int reg, rc;
31
32 vcpu->stat.instruction_lctlg++;
33
34 useraddr = kvm_s390_get_base_disp_rsy(vcpu);
35
36 if (useraddr & 7)
37 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
38
39 reg = reg1;
40
41 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
42 useraddr);
43 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
44
45 do {
46 rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
47 (u64 __user *) useraddr);
48 if (rc)
49 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
50 useraddr += 8;
51 if (reg == reg3)
52 break;
53 reg = (reg + 1) % 16;
54 } while (1);
55 return 0;
56}
57
58static int handle_lctl(struct kvm_vcpu *vcpu)
59{
60 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
61 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
62 u64 useraddr;
63 u32 val = 0;
64 int reg, rc;
65
66 vcpu->stat.instruction_lctl++;
67
68 useraddr = kvm_s390_get_base_disp_rs(vcpu);
69
70 if (useraddr & 3)
71 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
72
73 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
74 useraddr);
75 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
76
77 reg = reg1;
78 do {
79 rc = get_guest(vcpu, val, (u32 __user *) useraddr);
80 if (rc)
81 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
82 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
83 vcpu->arch.sie_block->gcr[reg] |= val;
84 useraddr += 4;
85 if (reg == reg3)
86 break;
87 reg = (reg + 1) % 16;
88 } while (1);
89 return 0;
90}
91
92static const intercept_handler_t eb_handlers[256] = {
93 [0x2f] = handle_lctlg,
94 [0x8a] = kvm_s390_handle_priv_eb,
95};
96
97static int handle_eb(struct kvm_vcpu *vcpu)
98{
99 intercept_handler_t handler;
100
101 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
102 if (handler)
103 return handler(vcpu);
104 return -EOPNOTSUPP;
105}
106 25
107static const intercept_handler_t instruction_handlers[256] = { 26static const intercept_handler_t instruction_handlers[256] = {
108 [0x01] = kvm_s390_handle_01, 27 [0x01] = kvm_s390_handle_01,
@@ -110,10 +29,10 @@ static const intercept_handler_t instruction_handlers[256] = {
110 [0x83] = kvm_s390_handle_diag, 29 [0x83] = kvm_s390_handle_diag,
111 [0xae] = kvm_s390_handle_sigp, 30 [0xae] = kvm_s390_handle_sigp,
112 [0xb2] = kvm_s390_handle_b2, 31 [0xb2] = kvm_s390_handle_b2,
113 [0xb7] = handle_lctl, 32 [0xb7] = kvm_s390_handle_lctl,
114 [0xb9] = kvm_s390_handle_b9, 33 [0xb9] = kvm_s390_handle_b9,
115 [0xe5] = kvm_s390_handle_e5, 34 [0xe5] = kvm_s390_handle_e5,
116 [0xeb] = handle_eb, 35 [0xeb] = kvm_s390_handle_eb,
117}; 36};
118 37
119static int handle_noop(struct kvm_vcpu *vcpu) 38static int handle_noop(struct kvm_vcpu *vcpu)
@@ -174,47 +93,12 @@ static int handle_stop(struct kvm_vcpu *vcpu)
174 93
175static int handle_validity(struct kvm_vcpu *vcpu) 94static int handle_validity(struct kvm_vcpu *vcpu)
176{ 95{
177 unsigned long vmaddr;
178 int viwhy = vcpu->arch.sie_block->ipb >> 16; 96 int viwhy = vcpu->arch.sie_block->ipb >> 16;
179 int rc;
180 97
181 vcpu->stat.exit_validity++; 98 vcpu->stat.exit_validity++;
182 trace_kvm_s390_intercept_validity(vcpu, viwhy); 99 trace_kvm_s390_intercept_validity(vcpu, viwhy);
183 if (viwhy == 0x37) { 100 WARN_ONCE(true, "kvm: unhandled validity intercept 0x%x\n", viwhy);
184 vmaddr = gmap_fault(vcpu->arch.sie_block->prefix, 101 return -EOPNOTSUPP;
185 vcpu->arch.gmap);
186 if (IS_ERR_VALUE(vmaddr)) {
187 rc = -EOPNOTSUPP;
188 goto out;
189 }
190 rc = fault_in_pages_writeable((char __user *) vmaddr,
191 PAGE_SIZE);
192 if (rc) {
193 /* user will receive sigsegv, exit to user */
194 rc = -EOPNOTSUPP;
195 goto out;
196 }
197 vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE,
198 vcpu->arch.gmap);
199 if (IS_ERR_VALUE(vmaddr)) {
200 rc = -EOPNOTSUPP;
201 goto out;
202 }
203 rc = fault_in_pages_writeable((char __user *) vmaddr,
204 PAGE_SIZE);
205 if (rc) {
206 /* user will receive sigsegv, exit to user */
207 rc = -EOPNOTSUPP;
208 goto out;
209 }
210 } else
211 rc = -EOPNOTSUPP;
212
213out:
214 if (rc)
215 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
216 viwhy);
217 return rc;
218} 102}
219 103
220static int handle_instruction(struct kvm_vcpu *vcpu) 104static int handle_instruction(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 5c948177529e..7f35cb33e510 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -438,7 +438,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
438no_timer: 438no_timer:
439 spin_lock(&vcpu->arch.local_int.float_int->lock); 439 spin_lock(&vcpu->arch.local_int.float_int->lock);
440 spin_lock_bh(&vcpu->arch.local_int.lock); 440 spin_lock_bh(&vcpu->arch.local_int.lock);
441 add_wait_queue(&vcpu->arch.local_int.wq, &wait); 441 add_wait_queue(&vcpu->wq, &wait);
442 while (list_empty(&vcpu->arch.local_int.list) && 442 while (list_empty(&vcpu->arch.local_int.list) &&
443 list_empty(&vcpu->arch.local_int.float_int->list) && 443 list_empty(&vcpu->arch.local_int.float_int->list) &&
444 (!vcpu->arch.local_int.timer_due) && 444 (!vcpu->arch.local_int.timer_due) &&
@@ -452,7 +452,7 @@ no_timer:
452 } 452 }
453 __unset_cpu_idle(vcpu); 453 __unset_cpu_idle(vcpu);
454 __set_current_state(TASK_RUNNING); 454 __set_current_state(TASK_RUNNING);
455 remove_wait_queue(&vcpu->arch.local_int.wq, &wait); 455 remove_wait_queue(&vcpu->wq, &wait);
456 spin_unlock_bh(&vcpu->arch.local_int.lock); 456 spin_unlock_bh(&vcpu->arch.local_int.lock);
457 spin_unlock(&vcpu->arch.local_int.float_int->lock); 457 spin_unlock(&vcpu->arch.local_int.float_int->lock);
458 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 458 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
@@ -465,8 +465,8 @@ void kvm_s390_tasklet(unsigned long parm)
465 465
466 spin_lock(&vcpu->arch.local_int.lock); 466 spin_lock(&vcpu->arch.local_int.lock);
467 vcpu->arch.local_int.timer_due = 1; 467 vcpu->arch.local_int.timer_due = 1;
468 if (waitqueue_active(&vcpu->arch.local_int.wq)) 468 if (waitqueue_active(&vcpu->wq))
469 wake_up_interruptible(&vcpu->arch.local_int.wq); 469 wake_up_interruptible(&vcpu->wq);
470 spin_unlock(&vcpu->arch.local_int.lock); 470 spin_unlock(&vcpu->arch.local_int.lock);
471} 471}
472 472
@@ -613,7 +613,7 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
613 spin_lock_bh(&li->lock); 613 spin_lock_bh(&li->lock);
614 list_add(&inti->list, &li->list); 614 list_add(&inti->list, &li->list);
615 atomic_set(&li->active, 1); 615 atomic_set(&li->active, 1);
616 BUG_ON(waitqueue_active(&li->wq)); 616 BUG_ON(waitqueue_active(li->wq));
617 spin_unlock_bh(&li->lock); 617 spin_unlock_bh(&li->lock);
618 return 0; 618 return 0;
619} 619}
@@ -746,8 +746,8 @@ int kvm_s390_inject_vm(struct kvm *kvm,
746 li = fi->local_int[sigcpu]; 746 li = fi->local_int[sigcpu];
747 spin_lock_bh(&li->lock); 747 spin_lock_bh(&li->lock);
748 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 748 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
749 if (waitqueue_active(&li->wq)) 749 if (waitqueue_active(li->wq))
750 wake_up_interruptible(&li->wq); 750 wake_up_interruptible(li->wq);
751 spin_unlock_bh(&li->lock); 751 spin_unlock_bh(&li->lock);
752 spin_unlock(&fi->lock); 752 spin_unlock(&fi->lock);
753 mutex_unlock(&kvm->lock); 753 mutex_unlock(&kvm->lock);
@@ -832,8 +832,8 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
832 if (inti->type == KVM_S390_SIGP_STOP) 832 if (inti->type == KVM_S390_SIGP_STOP)
833 li->action_bits |= ACTION_STOP_ON_STOP; 833 li->action_bits |= ACTION_STOP_ON_STOP;
834 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 834 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
835 if (waitqueue_active(&li->wq)) 835 if (waitqueue_active(&vcpu->wq))
836 wake_up_interruptible(&vcpu->arch.local_int.wq); 836 wake_up_interruptible(&vcpu->wq);
837 spin_unlock_bh(&li->lock); 837 spin_unlock_bh(&li->lock);
838 mutex_unlock(&vcpu->kvm->lock); 838 mutex_unlock(&vcpu->kvm->lock);
839 return 0; 839 return 0;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c1c7c683fa26..ba694d2ba51e 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -59,6 +59,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
59 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, 59 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, 60 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61 { "exit_wait_state", VCPU_STAT(exit_wait_state) }, 61 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
62 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
62 { "instruction_stidp", VCPU_STAT(instruction_stidp) }, 63 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63 { "instruction_spx", VCPU_STAT(instruction_spx) }, 64 { "instruction_spx", VCPU_STAT(instruction_spx) },
64 { "instruction_stpx", VCPU_STAT(instruction_stpx) }, 65 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
@@ -84,6 +85,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
84}; 85};
85 86
86static unsigned long long *facilities; 87static unsigned long long *facilities;
88static struct gmap_notifier gmap_notifier;
87 89
88/* Section: not file related */ 90/* Section: not file related */
89int kvm_arch_hardware_enable(void *garbage) 91int kvm_arch_hardware_enable(void *garbage)
@@ -96,13 +98,18 @@ void kvm_arch_hardware_disable(void *garbage)
96{ 98{
97} 99}
98 100
101static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
102
99int kvm_arch_hardware_setup(void) 103int kvm_arch_hardware_setup(void)
100{ 104{
105 gmap_notifier.notifier_call = kvm_gmap_notifier;
106 gmap_register_ipte_notifier(&gmap_notifier);
101 return 0; 107 return 0;
102} 108}
103 109
104void kvm_arch_hardware_unsetup(void) 110void kvm_arch_hardware_unsetup(void)
105{ 111{
112 gmap_unregister_ipte_notifier(&gmap_notifier);
106} 113}
107 114
108void kvm_arch_check_processor_compat(void *rtn) 115void kvm_arch_check_processor_compat(void *rtn)
@@ -239,6 +246,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
239 kvm->arch.gmap = gmap_alloc(current->mm); 246 kvm->arch.gmap = gmap_alloc(current->mm);
240 if (!kvm->arch.gmap) 247 if (!kvm->arch.gmap)
241 goto out_nogmap; 248 goto out_nogmap;
249 kvm->arch.gmap->private = kvm;
242 } 250 }
243 251
244 kvm->arch.css_support = 0; 252 kvm->arch.css_support = 0;
@@ -270,7 +278,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
270 278
271 free_page((unsigned long)(vcpu->arch.sie_block)); 279 free_page((unsigned long)(vcpu->arch.sie_block));
272 kvm_vcpu_uninit(vcpu); 280 kvm_vcpu_uninit(vcpu);
273 kfree(vcpu); 281 kmem_cache_free(kvm_vcpu_cache, vcpu);
274} 282}
275 283
276static void kvm_free_vcpus(struct kvm *kvm) 284static void kvm_free_vcpus(struct kvm *kvm)
@@ -309,6 +317,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
309 vcpu->arch.gmap = gmap_alloc(current->mm); 317 vcpu->arch.gmap = gmap_alloc(current->mm);
310 if (!vcpu->arch.gmap) 318 if (!vcpu->arch.gmap)
311 return -ENOMEM; 319 return -ENOMEM;
320 vcpu->arch.gmap->private = vcpu->kvm;
312 return 0; 321 return 0;
313 } 322 }
314 323
@@ -373,8 +382,10 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
373{ 382{
374 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 383 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
375 CPUSTAT_SM | 384 CPUSTAT_SM |
376 CPUSTAT_STOPPED); 385 CPUSTAT_STOPPED |
386 CPUSTAT_GED);
377 vcpu->arch.sie_block->ecb = 6; 387 vcpu->arch.sie_block->ecb = 6;
388 vcpu->arch.sie_block->ecb2 = 8;
378 vcpu->arch.sie_block->eca = 0xC1002001U; 389 vcpu->arch.sie_block->eca = 0xC1002001U;
379 vcpu->arch.sie_block->fac = (int) (long) facilities; 390 vcpu->arch.sie_block->fac = (int) (long) facilities;
380 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 391 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
@@ -397,7 +408,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
397 408
398 rc = -ENOMEM; 409 rc = -ENOMEM;
399 410
400 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); 411 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
401 if (!vcpu) 412 if (!vcpu)
402 goto out; 413 goto out;
403 414
@@ -427,7 +438,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
427 vcpu->arch.local_int.float_int = &kvm->arch.float_int; 438 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
428 spin_lock(&kvm->arch.float_int.lock); 439 spin_lock(&kvm->arch.float_int.lock);
429 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int; 440 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
430 init_waitqueue_head(&vcpu->arch.local_int.wq); 441 vcpu->arch.local_int.wq = &vcpu->wq;
431 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 442 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
432 spin_unlock(&kvm->arch.float_int.lock); 443 spin_unlock(&kvm->arch.float_int.lock);
433 444
@@ -442,7 +453,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
442out_free_sie_block: 453out_free_sie_block:
443 free_page((unsigned long)(vcpu->arch.sie_block)); 454 free_page((unsigned long)(vcpu->arch.sie_block));
444out_free_cpu: 455out_free_cpu:
445 kfree(vcpu); 456 kmem_cache_free(kvm_vcpu_cache, vcpu);
446out: 457out:
447 return ERR_PTR(rc); 458 return ERR_PTR(rc);
448} 459}
@@ -454,6 +465,50 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
454 return 0; 465 return 0;
455} 466}
456 467
468void s390_vcpu_block(struct kvm_vcpu *vcpu)
469{
470 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
471}
472
473void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
474{
475 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
476}
477
478/*
479 * Kick a guest cpu out of SIE and wait until SIE is not running.
480 * If the CPU is not running (e.g. waiting as idle) the function will
481 * return immediately. */
482void exit_sie(struct kvm_vcpu *vcpu)
483{
484 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
485 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
486 cpu_relax();
487}
488
489/* Kick a guest cpu out of SIE and prevent SIE-reentry */
490void exit_sie_sync(struct kvm_vcpu *vcpu)
491{
492 s390_vcpu_block(vcpu);
493 exit_sie(vcpu);
494}
495
496static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
497{
498 int i;
499 struct kvm *kvm = gmap->private;
500 struct kvm_vcpu *vcpu;
501
502 kvm_for_each_vcpu(i, vcpu, kvm) {
503 /* match against both prefix pages */
504 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
505 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
506 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
507 exit_sie_sync(vcpu);
508 }
509 }
510}
511
457int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 512int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
458{ 513{
459 /* kvm common code refers to this, but never calls it */ 514 /* kvm common code refers to this, but never calls it */
@@ -606,6 +661,27 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
606 return -EINVAL; /* not implemented yet */ 661 return -EINVAL; /* not implemented yet */
607} 662}
608 663
664static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
665{
666 /*
667 * We use MMU_RELOAD just to re-arm the ipte notifier for the
668 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
669 * This ensures that the ipte instruction for this request has
670 * already finished. We might race against a second unmapper that
671 * wants to set the blocking bit. Lets just retry the request loop.
672 */
673 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
674 int rc;
675 rc = gmap_ipte_notify(vcpu->arch.gmap,
676 vcpu->arch.sie_block->prefix,
677 PAGE_SIZE * 2);
678 if (rc)
679 return rc;
680 s390_vcpu_unblock(vcpu);
681 }
682 return 0;
683}
684
609static int __vcpu_run(struct kvm_vcpu *vcpu) 685static int __vcpu_run(struct kvm_vcpu *vcpu)
610{ 686{
611 int rc; 687 int rc;
@@ -621,6 +697,10 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
621 if (!kvm_is_ucontrol(vcpu->kvm)) 697 if (!kvm_is_ucontrol(vcpu->kvm))
622 kvm_s390_deliver_pending_interrupts(vcpu); 698 kvm_s390_deliver_pending_interrupts(vcpu);
623 699
700 rc = kvm_s390_handle_requests(vcpu);
701 if (rc)
702 return rc;
703
624 vcpu->arch.sie_block->icptcode = 0; 704 vcpu->arch.sie_block->icptcode = 0;
625 preempt_disable(); 705 preempt_disable();
626 kvm_guest_enter(); 706 kvm_guest_enter();
@@ -630,7 +710,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
630 trace_kvm_s390_sie_enter(vcpu, 710 trace_kvm_s390_sie_enter(vcpu,
631 atomic_read(&vcpu->arch.sie_block->cpuflags)); 711 atomic_read(&vcpu->arch.sie_block->cpuflags));
632 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); 712 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
633 if (rc) { 713 if (rc > 0)
714 rc = 0;
715 if (rc < 0) {
634 if (kvm_is_ucontrol(vcpu->kvm)) { 716 if (kvm_is_ucontrol(vcpu->kvm)) {
635 rc = SIE_INTERCEPT_UCONTROL; 717 rc = SIE_INTERCEPT_UCONTROL;
636 } else { 718 } else {
@@ -1046,7 +1128,7 @@ static int __init kvm_s390_init(void)
1046 return -ENOMEM; 1128 return -ENOMEM;
1047 } 1129 }
1048 memcpy(facilities, S390_lowcore.stfle_fac_list, 16); 1130 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
1049 facilities[0] &= 0xff00fff3f47c0000ULL; 1131 facilities[0] &= 0xff82fff3f47c0000ULL;
1050 facilities[1] &= 0x001c000000000000ULL; 1132 facilities[1] &= 0x001c000000000000ULL;
1051 return 0; 1133 return 0;
1052} 1134}
@@ -1059,3 +1141,12 @@ static void __exit kvm_s390_exit(void)
1059 1141
1060module_init(kvm_s390_init); 1142module_init(kvm_s390_init);
1061module_exit(kvm_s390_exit); 1143module_exit(kvm_s390_exit);
1144
1145/*
1146 * Enable autoloading of the kvm module.
1147 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1148 * since x86 takes a different approach.
1149 */
1150#include <linux/miscdevice.h>
1151MODULE_ALIAS_MISCDEV(KVM_MINOR);
1152MODULE_ALIAS("devname:kvm");
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index efc14f687265..028ca9fd2158 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -63,6 +63,7 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
63{ 63{
64 vcpu->arch.sie_block->prefix = prefix & 0x7fffe000u; 64 vcpu->arch.sie_block->prefix = prefix & 0x7fffe000u;
65 vcpu->arch.sie_block->ihcpu = 0xffff; 65 vcpu->arch.sie_block->ihcpu = 0xffff;
66 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
66} 67}
67 68
68static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu) 69static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu)
@@ -85,6 +86,12 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
85 *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; 86 *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
86} 87}
87 88
89static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
90{
91 *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
92 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
93}
94
88static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) 95static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
89{ 96{
90 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 97 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
@@ -125,7 +132,8 @@ int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
125int kvm_s390_handle_01(struct kvm_vcpu *vcpu); 132int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
126int kvm_s390_handle_b9(struct kvm_vcpu *vcpu); 133int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
127int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu); 134int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
128int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu); 135int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
136int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
129 137
130/* implemented in sigp.c */ 138/* implemented in sigp.c */
131int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); 139int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
@@ -133,6 +141,10 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
133/* implemented in kvm-s390.c */ 141/* implemented in kvm-s390.c */
134int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, 142int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu,
135 unsigned long addr); 143 unsigned long addr);
144void s390_vcpu_block(struct kvm_vcpu *vcpu);
145void s390_vcpu_unblock(struct kvm_vcpu *vcpu);
146void exit_sie(struct kvm_vcpu *vcpu);
147void exit_sie_sync(struct kvm_vcpu *vcpu);
136/* implemented in diag.c */ 148/* implemented in diag.c */
137int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); 149int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
138 150
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 6bbd7b5a0bbe..0da3e6eb6be6 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * handling privileged instructions 2 * handling privileged instructions
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008, 2013
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -20,6 +20,9 @@
20#include <asm/debug.h> 20#include <asm/debug.h>
21#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
22#include <asm/sysinfo.h> 22#include <asm/sysinfo.h>
23#include <asm/pgtable.h>
24#include <asm/pgalloc.h>
25#include <asm/io.h>
23#include <asm/ptrace.h> 26#include <asm/ptrace.h>
24#include <asm/compat.h> 27#include <asm/compat.h>
25#include "gaccess.h" 28#include "gaccess.h"
@@ -34,6 +37,9 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
34 37
35 vcpu->stat.instruction_spx++; 38 vcpu->stat.instruction_spx++;
36 39
40 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
41 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
42
37 operand2 = kvm_s390_get_base_disp_s(vcpu); 43 operand2 = kvm_s390_get_base_disp_s(vcpu);
38 44
39 /* must be word boundary */ 45 /* must be word boundary */
@@ -65,6 +71,9 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
65 71
66 vcpu->stat.instruction_stpx++; 72 vcpu->stat.instruction_stpx++;
67 73
74 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
75 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
76
68 operand2 = kvm_s390_get_base_disp_s(vcpu); 77 operand2 = kvm_s390_get_base_disp_s(vcpu);
69 78
70 /* must be word boundary */ 79 /* must be word boundary */
@@ -89,6 +98,9 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
89 98
90 vcpu->stat.instruction_stap++; 99 vcpu->stat.instruction_stap++;
91 100
101 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
102 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
103
92 useraddr = kvm_s390_get_base_disp_s(vcpu); 104 useraddr = kvm_s390_get_base_disp_s(vcpu);
93 105
94 if (useraddr & 1) 106 if (useraddr & 1)
@@ -105,7 +117,12 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
105static int handle_skey(struct kvm_vcpu *vcpu) 117static int handle_skey(struct kvm_vcpu *vcpu)
106{ 118{
107 vcpu->stat.instruction_storage_key++; 119 vcpu->stat.instruction_storage_key++;
108 vcpu->arch.sie_block->gpsw.addr -= 4; 120
121 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
122 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
123
124 vcpu->arch.sie_block->gpsw.addr =
125 __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
109 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 126 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
110 return 0; 127 return 0;
111} 128}
@@ -129,9 +146,10 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
129 * Store the two-word I/O interruption code into the 146 * Store the two-word I/O interruption code into the
130 * provided area. 147 * provided area.
131 */ 148 */
132 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) addr); 149 if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr)
133 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) (addr + 2)); 150 || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2))
134 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) (addr + 4)); 151 || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4)))
152 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
135 } else { 153 } else {
136 /* 154 /*
137 * Store the three-word I/O interruption code into 155 * Store the three-word I/O interruption code into
@@ -182,6 +200,9 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
182{ 200{
183 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); 201 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
184 202
203 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
204 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
205
185 if (vcpu->kvm->arch.css_support) { 206 if (vcpu->kvm->arch.css_support) {
186 /* 207 /*
187 * Most I/O instructions will be handled by userspace. 208 * Most I/O instructions will be handled by userspace.
@@ -210,8 +231,12 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
210 int rc; 231 int rc;
211 232
212 vcpu->stat.instruction_stfl++; 233 vcpu->stat.instruction_stfl++;
234
235 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
236 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
237
213 /* only pass the facility bits, which we can handle */ 238 /* only pass the facility bits, which we can handle */
214 facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3; 239 facility_list = S390_lowcore.stfl_fac_list & 0xff82fff3;
215 240
216 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), 241 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
217 &facility_list, sizeof(facility_list)); 242 &facility_list, sizeof(facility_list));
@@ -255,8 +280,8 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
255 u64 addr; 280 u64 addr;
256 281
257 if (gpsw->mask & PSW_MASK_PSTATE) 282 if (gpsw->mask & PSW_MASK_PSTATE)
258 return kvm_s390_inject_program_int(vcpu, 283 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
259 PGM_PRIVILEGED_OPERATION); 284
260 addr = kvm_s390_get_base_disp_s(vcpu); 285 addr = kvm_s390_get_base_disp_s(vcpu);
261 if (addr & 7) 286 if (addr & 7)
262 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 287 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -278,6 +303,9 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
278 psw_t new_psw; 303 psw_t new_psw;
279 u64 addr; 304 u64 addr;
280 305
306 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
307 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
308
281 addr = kvm_s390_get_base_disp_s(vcpu); 309 addr = kvm_s390_get_base_disp_s(vcpu);
282 if (addr & 7) 310 if (addr & 7)
283 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 311 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -296,6 +324,9 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
296 324
297 vcpu->stat.instruction_stidp++; 325 vcpu->stat.instruction_stidp++;
298 326
327 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
328 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
329
299 operand2 = kvm_s390_get_base_disp_s(vcpu); 330 operand2 = kvm_s390_get_base_disp_s(vcpu);
300 331
301 if (operand2 & 7) 332 if (operand2 & 7)
@@ -351,16 +382,30 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
351 vcpu->stat.instruction_stsi++; 382 vcpu->stat.instruction_stsi++;
352 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 383 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
353 384
354 operand2 = kvm_s390_get_base_disp_s(vcpu); 385 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
386 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
387
388 if (fc > 3) {
389 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; /* cc 3 */
390 return 0;
391 }
355 392
356 if (operand2 & 0xfff && fc > 0) 393 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
394 || vcpu->run->s.regs.gprs[1] & 0xffff0000)
357 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 395 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
358 396
359 switch (fc) { 397 if (fc == 0) {
360 case 0:
361 vcpu->run->s.regs.gprs[0] = 3 << 28; 398 vcpu->run->s.regs.gprs[0] = 3 << 28;
362 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 399 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); /* cc 0 */
363 return 0; 400 return 0;
401 }
402
403 operand2 = kvm_s390_get_base_disp_s(vcpu);
404
405 if (operand2 & 0xfff)
406 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
407
408 switch (fc) {
364 case 1: /* same handling for 1 and 2 */ 409 case 1: /* same handling for 1 and 2 */
365 case 2: 410 case 2:
366 mem = get_zeroed_page(GFP_KERNEL); 411 mem = get_zeroed_page(GFP_KERNEL);
@@ -377,8 +422,6 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
377 goto out_no_data; 422 goto out_no_data;
378 handle_stsi_3_2_2(vcpu, (void *) mem); 423 handle_stsi_3_2_2(vcpu, (void *) mem);
379 break; 424 break;
380 default:
381 goto out_no_data;
382 } 425 }
383 426
384 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { 427 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
@@ -432,20 +475,14 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
432 intercept_handler_t handler; 475 intercept_handler_t handler;
433 476
434 /* 477 /*
435 * a lot of B2 instructions are priviledged. We first check for 478 * A lot of B2 instructions are priviledged. Here we check for
436 * the privileged ones, that we can handle in the kernel. If the 479 * the privileged ones, that we can handle in the kernel.
437 * kernel can handle this instruction, we check for the problem 480 * Anything else goes to userspace.
438 * state bit and (a) handle the instruction or (b) send a code 2 481 */
439 * program check.
440 * Anything else goes to userspace.*/
441 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 482 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
442 if (handler) { 483 if (handler)
443 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 484 return handler(vcpu);
444 return kvm_s390_inject_program_int(vcpu, 485
445 PGM_PRIVILEGED_OPERATION);
446 else
447 return handler(vcpu);
448 }
449 return -EOPNOTSUPP; 486 return -EOPNOTSUPP;
450} 487}
451 488
@@ -453,8 +490,7 @@ static int handle_epsw(struct kvm_vcpu *vcpu)
453{ 490{
454 int reg1, reg2; 491 int reg1, reg2;
455 492
456 reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24; 493 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
457 reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
458 494
459 /* This basically extracts the mask half of the psw. */ 495 /* This basically extracts the mask half of the psw. */
460 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000; 496 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
@@ -467,9 +503,88 @@ static int handle_epsw(struct kvm_vcpu *vcpu)
467 return 0; 503 return 0;
468} 504}
469 505
506#define PFMF_RESERVED 0xfffc0101UL
507#define PFMF_SK 0x00020000UL
508#define PFMF_CF 0x00010000UL
509#define PFMF_UI 0x00008000UL
510#define PFMF_FSC 0x00007000UL
511#define PFMF_NQ 0x00000800UL
512#define PFMF_MR 0x00000400UL
513#define PFMF_MC 0x00000200UL
514#define PFMF_KEY 0x000000feUL
515
516static int handle_pfmf(struct kvm_vcpu *vcpu)
517{
518 int reg1, reg2;
519 unsigned long start, end;
520
521 vcpu->stat.instruction_pfmf++;
522
523 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
524
525 if (!MACHINE_HAS_PFMF)
526 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
527
528 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
529 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
530
531 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
532 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
533
534 /* Only provide non-quiescing support if the host supports it */
535 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
536 S390_lowcore.stfl_fac_list & 0x00020000)
537 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
538
539 /* No support for conditional-SSKE */
540 if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
541 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
542
543 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
544 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
545 case 0x00000000:
546 end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
547 break;
548 case 0x00001000:
549 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
550 break;
551 /* We dont support EDAT2
552 case 0x00002000:
553 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
554 break;*/
555 default:
556 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
557 }
558 while (start < end) {
559 unsigned long useraddr;
560
561 useraddr = gmap_translate(start, vcpu->arch.gmap);
562 if (IS_ERR((void *)useraddr))
563 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
564
565 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
566 if (clear_user((void __user *)useraddr, PAGE_SIZE))
567 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
568 }
569
570 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
571 if (set_guest_storage_key(current->mm, useraddr,
572 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
573 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
574 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
575 }
576
577 start += PAGE_SIZE;
578 }
579 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
580 vcpu->run->s.regs.gprs[reg2] = end;
581 return 0;
582}
583
470static const intercept_handler_t b9_handlers[256] = { 584static const intercept_handler_t b9_handlers[256] = {
471 [0x8d] = handle_epsw, 585 [0x8d] = handle_epsw,
472 [0x9c] = handle_io_inst, 586 [0x9c] = handle_io_inst,
587 [0xaf] = handle_pfmf,
473}; 588};
474 589
475int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) 590int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
@@ -478,29 +593,96 @@ int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
478 593
479 /* This is handled just as for the B2 instructions. */ 594 /* This is handled just as for the B2 instructions. */
480 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 595 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
481 if (handler) { 596 if (handler)
482 if ((handler != handle_epsw) && 597 return handler(vcpu);
483 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)) 598
484 return kvm_s390_inject_program_int(vcpu,
485 PGM_PRIVILEGED_OPERATION);
486 else
487 return handler(vcpu);
488 }
489 return -EOPNOTSUPP; 599 return -EOPNOTSUPP;
490} 600}
491 601
602int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
603{
604 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
605 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
606 u64 useraddr;
607 u32 val = 0;
608 int reg, rc;
609
610 vcpu->stat.instruction_lctl++;
611
612 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
613 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
614
615 useraddr = kvm_s390_get_base_disp_rs(vcpu);
616
617 if (useraddr & 3)
618 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
619
620 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
621 useraddr);
622 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
623
624 reg = reg1;
625 do {
626 rc = get_guest(vcpu, val, (u32 __user *) useraddr);
627 if (rc)
628 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
629 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
630 vcpu->arch.sie_block->gcr[reg] |= val;
631 useraddr += 4;
632 if (reg == reg3)
633 break;
634 reg = (reg + 1) % 16;
635 } while (1);
636
637 return 0;
638}
639
640static int handle_lctlg(struct kvm_vcpu *vcpu)
641{
642 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
643 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
644 u64 useraddr;
645 int reg, rc;
646
647 vcpu->stat.instruction_lctlg++;
648
649 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
650 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
651
652 useraddr = kvm_s390_get_base_disp_rsy(vcpu);
653
654 if (useraddr & 7)
655 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
656
657 reg = reg1;
658
659 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
660 useraddr);
661 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
662
663 do {
664 rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
665 (u64 __user *) useraddr);
666 if (rc)
667 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
668 useraddr += 8;
669 if (reg == reg3)
670 break;
671 reg = (reg + 1) % 16;
672 } while (1);
673
674 return 0;
675}
676
492static const intercept_handler_t eb_handlers[256] = { 677static const intercept_handler_t eb_handlers[256] = {
678 [0x2f] = handle_lctlg,
493 [0x8a] = handle_io_inst, 679 [0x8a] = handle_io_inst,
494}; 680};
495 681
496int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu) 682int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
497{ 683{
498 intercept_handler_t handler; 684 intercept_handler_t handler;
499 685
500 /* All eb instructions that end up here are privileged. */
501 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
502 return kvm_s390_inject_program_int(vcpu,
503 PGM_PRIVILEGED_OPERATION);
504 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; 686 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
505 if (handler) 687 if (handler)
506 return handler(vcpu); 688 return handler(vcpu);
@@ -515,6 +697,9 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
515 697
516 vcpu->stat.instruction_tprot++; 698 vcpu->stat.instruction_tprot++;
517 699
700 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
701 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
702
518 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); 703 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
519 704
520 /* we only handle the Linux memory detection case: 705 /* we only handle the Linux memory detection case:
@@ -560,8 +745,7 @@ static int handle_sckpf(struct kvm_vcpu *vcpu)
560 u32 value; 745 u32 value;
561 746
562 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 747 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
563 return kvm_s390_inject_program_int(vcpu, 748 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
564 PGM_PRIVILEGED_OPERATION);
565 749
566 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) 750 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
567 return kvm_s390_inject_program_int(vcpu, 751 return kvm_s390_inject_program_int(vcpu,
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 1c48ab2845e0..bec398c57acf 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -79,8 +79,8 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
79 list_add_tail(&inti->list, &li->list); 79 list_add_tail(&inti->list, &li->list);
80 atomic_set(&li->active, 1); 80 atomic_set(&li->active, 1);
81 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 81 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
82 if (waitqueue_active(&li->wq)) 82 if (waitqueue_active(li->wq))
83 wake_up_interruptible(&li->wq); 83 wake_up_interruptible(li->wq);
84 spin_unlock_bh(&li->lock); 84 spin_unlock_bh(&li->lock);
85 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 85 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
86 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); 86 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
@@ -117,8 +117,8 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
117 list_add_tail(&inti->list, &li->list); 117 list_add_tail(&inti->list, &li->list);
118 atomic_set(&li->active, 1); 118 atomic_set(&li->active, 1);
119 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 119 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
120 if (waitqueue_active(&li->wq)) 120 if (waitqueue_active(li->wq))
121 wake_up_interruptible(&li->wq); 121 wake_up_interruptible(li->wq);
122 spin_unlock_bh(&li->lock); 122 spin_unlock_bh(&li->lock);
123 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 123 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
124 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); 124 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
@@ -145,8 +145,8 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
145 atomic_set(&li->active, 1); 145 atomic_set(&li->active, 1);
146 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); 146 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
147 li->action_bits |= action; 147 li->action_bits |= action;
148 if (waitqueue_active(&li->wq)) 148 if (waitqueue_active(li->wq))
149 wake_up_interruptible(&li->wq); 149 wake_up_interruptible(li->wq);
150out: 150out:
151 spin_unlock_bh(&li->lock); 151 spin_unlock_bh(&li->lock);
152 152
@@ -250,8 +250,8 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
250 250
251 list_add_tail(&inti->list, &li->list); 251 list_add_tail(&inti->list, &li->list);
252 atomic_set(&li->active, 1); 252 atomic_set(&li->active, 1);
253 if (waitqueue_active(&li->wq)) 253 if (waitqueue_active(li->wq))
254 wake_up_interruptible(&li->wq); 254 wake_up_interruptible(li->wq);
255 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 255 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
256 256
257 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); 257 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
@@ -333,8 +333,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
333 333
334 /* sigp in userspace can exit */ 334 /* sigp in userspace can exit */
335 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 335 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
336 return kvm_s390_inject_program_int(vcpu, 336 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
337 PGM_PRIVILEGED_OPERATION);
338 337
339 order_code = kvm_s390_get_base_disp_rs(vcpu); 338 order_code = kvm_s390_get_base_disp_rs(vcpu);
340 339
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 74c29d922458..17bf4d3d303a 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -689,7 +689,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
689 entry = *ptep; 689 entry = *ptep;
690 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) { 690 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) {
691 pgste = pgste_get_lock(ptep); 691 pgste = pgste_get_lock(ptep);
692 pgste_val(pgste) |= RCP_IN_BIT; 692 pgste_val(pgste) |= PGSTE_IN_BIT;
693 pgste_set_unlock(ptep, pgste); 693 pgste_set_unlock(ptep, pgste);
694 start += PAGE_SIZE; 694 start += PAGE_SIZE;
695 len -= PAGE_SIZE; 695 len -= PAGE_SIZE;