diff options
Diffstat (limited to 'arch/s390')
28 files changed, 3630 insertions, 629 deletions
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index 4e63f1a13600..31ab9f346d7e 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h | |||
@@ -57,6 +57,20 @@ static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit) | |||
57 | void smp_ctl_set_bit(int cr, int bit); | 57 | void smp_ctl_set_bit(int cr, int bit); |
58 | void smp_ctl_clear_bit(int cr, int bit); | 58 | void smp_ctl_clear_bit(int cr, int bit); |
59 | 59 | ||
60 | union ctlreg0 { | ||
61 | unsigned long val; | ||
62 | struct { | ||
63 | #ifdef CONFIG_64BIT | ||
64 | unsigned long : 32; | ||
65 | #endif | ||
66 | unsigned long : 3; | ||
67 | unsigned long lap : 1; /* Low-address-protection control */ | ||
68 | unsigned long : 4; | ||
69 | unsigned long edat : 1; /* Enhanced-DAT-enablement control */ | ||
70 | unsigned long : 23; | ||
71 | }; | ||
72 | }; | ||
73 | |||
60 | #ifdef CONFIG_SMP | 74 | #ifdef CONFIG_SMP |
61 | # define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) | 75 | # define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) |
62 | # define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) | 76 | # define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 154b60089be9..4181d7baabba 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -32,16 +32,26 @@ | |||
32 | #define KVM_NR_IRQCHIPS 1 | 32 | #define KVM_NR_IRQCHIPS 1 |
33 | #define KVM_IRQCHIP_NUM_PINS 4096 | 33 | #define KVM_IRQCHIP_NUM_PINS 4096 |
34 | 34 | ||
35 | #define SIGP_CTRL_C 0x00800000 | ||
36 | |||
35 | struct sca_entry { | 37 | struct sca_entry { |
36 | atomic_t scn; | 38 | atomic_t ctrl; |
37 | __u32 reserved; | 39 | __u32 reserved; |
38 | __u64 sda; | 40 | __u64 sda; |
39 | __u64 reserved2[2]; | 41 | __u64 reserved2[2]; |
40 | } __attribute__((packed)); | 42 | } __attribute__((packed)); |
41 | 43 | ||
44 | union ipte_control { | ||
45 | unsigned long val; | ||
46 | struct { | ||
47 | unsigned long k : 1; | ||
48 | unsigned long kh : 31; | ||
49 | unsigned long kg : 32; | ||
50 | }; | ||
51 | }; | ||
42 | 52 | ||
43 | struct sca_block { | 53 | struct sca_block { |
44 | __u64 ipte_control; | 54 | union ipte_control ipte_control; |
45 | __u64 reserved[5]; | 55 | __u64 reserved[5]; |
46 | __u64 mcn; | 56 | __u64 mcn; |
47 | __u64 reserved2; | 57 | __u64 reserved2; |
@@ -64,6 +74,7 @@ struct sca_block { | |||
64 | #define CPUSTAT_ZARCH 0x00000800 | 74 | #define CPUSTAT_ZARCH 0x00000800 |
65 | #define CPUSTAT_MCDS 0x00000100 | 75 | #define CPUSTAT_MCDS 0x00000100 |
66 | #define CPUSTAT_SM 0x00000080 | 76 | #define CPUSTAT_SM 0x00000080 |
77 | #define CPUSTAT_IBS 0x00000040 | ||
67 | #define CPUSTAT_G 0x00000008 | 78 | #define CPUSTAT_G 0x00000008 |
68 | #define CPUSTAT_GED 0x00000004 | 79 | #define CPUSTAT_GED 0x00000004 |
69 | #define CPUSTAT_J 0x00000002 | 80 | #define CPUSTAT_J 0x00000002 |
@@ -71,7 +82,9 @@ struct sca_block { | |||
71 | 82 | ||
72 | struct kvm_s390_sie_block { | 83 | struct kvm_s390_sie_block { |
73 | atomic_t cpuflags; /* 0x0000 */ | 84 | atomic_t cpuflags; /* 0x0000 */ |
74 | __u32 prefix; /* 0x0004 */ | 85 | __u32 : 1; /* 0x0004 */ |
86 | __u32 prefix : 18; | ||
87 | __u32 : 13; | ||
75 | __u8 reserved08[4]; /* 0x0008 */ | 88 | __u8 reserved08[4]; /* 0x0008 */ |
76 | #define PROG_IN_SIE (1<<0) | 89 | #define PROG_IN_SIE (1<<0) |
77 | __u32 prog0c; /* 0x000c */ | 90 | __u32 prog0c; /* 0x000c */ |
@@ -85,12 +98,27 @@ struct kvm_s390_sie_block { | |||
85 | __u8 reserved40[4]; /* 0x0040 */ | 98 | __u8 reserved40[4]; /* 0x0040 */ |
86 | #define LCTL_CR0 0x8000 | 99 | #define LCTL_CR0 0x8000 |
87 | #define LCTL_CR6 0x0200 | 100 | #define LCTL_CR6 0x0200 |
101 | #define LCTL_CR9 0x0040 | ||
102 | #define LCTL_CR10 0x0020 | ||
103 | #define LCTL_CR11 0x0010 | ||
88 | #define LCTL_CR14 0x0002 | 104 | #define LCTL_CR14 0x0002 |
89 | __u16 lctl; /* 0x0044 */ | 105 | __u16 lctl; /* 0x0044 */ |
90 | __s16 icpua; /* 0x0046 */ | 106 | __s16 icpua; /* 0x0046 */ |
91 | #define ICTL_LPSW 0x00400000 | 107 | #define ICTL_PINT 0x20000000 |
108 | #define ICTL_LPSW 0x00400000 | ||
109 | #define ICTL_STCTL 0x00040000 | ||
110 | #define ICTL_ISKE 0x00004000 | ||
111 | #define ICTL_SSKE 0x00002000 | ||
112 | #define ICTL_RRBE 0x00001000 | ||
113 | #define ICTL_TPROT 0x00000200 | ||
92 | __u32 ictl; /* 0x0048 */ | 114 | __u32 ictl; /* 0x0048 */ |
93 | __u32 eca; /* 0x004c */ | 115 | __u32 eca; /* 0x004c */ |
116 | #define ICPT_INST 0x04 | ||
117 | #define ICPT_PROGI 0x08 | ||
118 | #define ICPT_INSTPROGI 0x0C | ||
119 | #define ICPT_OPEREXC 0x2C | ||
120 | #define ICPT_PARTEXEC 0x38 | ||
121 | #define ICPT_IOINST 0x40 | ||
94 | __u8 icptcode; /* 0x0050 */ | 122 | __u8 icptcode; /* 0x0050 */ |
95 | __u8 reserved51; /* 0x0051 */ | 123 | __u8 reserved51; /* 0x0051 */ |
96 | __u16 ihcpu; /* 0x0052 */ | 124 | __u16 ihcpu; /* 0x0052 */ |
@@ -109,9 +137,24 @@ struct kvm_s390_sie_block { | |||
109 | psw_t gpsw; /* 0x0090 */ | 137 | psw_t gpsw; /* 0x0090 */ |
110 | __u64 gg14; /* 0x00a0 */ | 138 | __u64 gg14; /* 0x00a0 */ |
111 | __u64 gg15; /* 0x00a8 */ | 139 | __u64 gg15; /* 0x00a8 */ |
112 | __u8 reservedb0[30]; /* 0x00b0 */ | 140 | __u8 reservedb0[20]; /* 0x00b0 */ |
113 | __u16 iprcc; /* 0x00ce */ | 141 | __u16 extcpuaddr; /* 0x00c4 */ |
114 | __u8 reservedd0[48]; /* 0x00d0 */ | 142 | __u16 eic; /* 0x00c6 */ |
143 | __u32 reservedc8; /* 0x00c8 */ | ||
144 | __u16 pgmilc; /* 0x00cc */ | ||
145 | __u16 iprcc; /* 0x00ce */ | ||
146 | __u32 dxc; /* 0x00d0 */ | ||
147 | __u16 mcn; /* 0x00d4 */ | ||
148 | __u8 perc; /* 0x00d6 */ | ||
149 | __u8 peratmid; /* 0x00d7 */ | ||
150 | __u64 peraddr; /* 0x00d8 */ | ||
151 | __u8 eai; /* 0x00e0 */ | ||
152 | __u8 peraid; /* 0x00e1 */ | ||
153 | __u8 oai; /* 0x00e2 */ | ||
154 | __u8 armid; /* 0x00e3 */ | ||
155 | __u8 reservede4[4]; /* 0x00e4 */ | ||
156 | __u64 tecmc; /* 0x00e8 */ | ||
157 | __u8 reservedf0[16]; /* 0x00f0 */ | ||
115 | __u64 gcr[16]; /* 0x0100 */ | 158 | __u64 gcr[16]; /* 0x0100 */ |
116 | __u64 gbea; /* 0x0180 */ | 159 | __u64 gbea; /* 0x0180 */ |
117 | __u8 reserved188[24]; /* 0x0188 */ | 160 | __u8 reserved188[24]; /* 0x0188 */ |
@@ -146,6 +189,8 @@ struct kvm_vcpu_stat { | |||
146 | u32 exit_instruction; | 189 | u32 exit_instruction; |
147 | u32 instruction_lctl; | 190 | u32 instruction_lctl; |
148 | u32 instruction_lctlg; | 191 | u32 instruction_lctlg; |
192 | u32 instruction_stctl; | ||
193 | u32 instruction_stctg; | ||
149 | u32 exit_program_interruption; | 194 | u32 exit_program_interruption; |
150 | u32 exit_instr_and_program; | 195 | u32 exit_instr_and_program; |
151 | u32 deliver_external_call; | 196 | u32 deliver_external_call; |
@@ -164,6 +209,7 @@ struct kvm_vcpu_stat { | |||
164 | u32 instruction_stpx; | 209 | u32 instruction_stpx; |
165 | u32 instruction_stap; | 210 | u32 instruction_stap; |
166 | u32 instruction_storage_key; | 211 | u32 instruction_storage_key; |
212 | u32 instruction_ipte_interlock; | ||
167 | u32 instruction_stsch; | 213 | u32 instruction_stsch; |
168 | u32 instruction_chsc; | 214 | u32 instruction_chsc; |
169 | u32 instruction_stsi; | 215 | u32 instruction_stsi; |
@@ -183,13 +229,58 @@ struct kvm_vcpu_stat { | |||
183 | u32 diagnose_9c; | 229 | u32 diagnose_9c; |
184 | }; | 230 | }; |
185 | 231 | ||
186 | #define PGM_OPERATION 0x01 | 232 | #define PGM_OPERATION 0x01 |
187 | #define PGM_PRIVILEGED_OP 0x02 | 233 | #define PGM_PRIVILEGED_OP 0x02 |
188 | #define PGM_EXECUTE 0x03 | 234 | #define PGM_EXECUTE 0x03 |
189 | #define PGM_PROTECTION 0x04 | 235 | #define PGM_PROTECTION 0x04 |
190 | #define PGM_ADDRESSING 0x05 | 236 | #define PGM_ADDRESSING 0x05 |
191 | #define PGM_SPECIFICATION 0x06 | 237 | #define PGM_SPECIFICATION 0x06 |
192 | #define PGM_DATA 0x07 | 238 | #define PGM_DATA 0x07 |
239 | #define PGM_FIXED_POINT_OVERFLOW 0x08 | ||
240 | #define PGM_FIXED_POINT_DIVIDE 0x09 | ||
241 | #define PGM_DECIMAL_OVERFLOW 0x0a | ||
242 | #define PGM_DECIMAL_DIVIDE 0x0b | ||
243 | #define PGM_HFP_EXPONENT_OVERFLOW 0x0c | ||
244 | #define PGM_HFP_EXPONENT_UNDERFLOW 0x0d | ||
245 | #define PGM_HFP_SIGNIFICANCE 0x0e | ||
246 | #define PGM_HFP_DIVIDE 0x0f | ||
247 | #define PGM_SEGMENT_TRANSLATION 0x10 | ||
248 | #define PGM_PAGE_TRANSLATION 0x11 | ||
249 | #define PGM_TRANSLATION_SPEC 0x12 | ||
250 | #define PGM_SPECIAL_OPERATION 0x13 | ||
251 | #define PGM_OPERAND 0x15 | ||
252 | #define PGM_TRACE_TABEL 0x16 | ||
253 | #define PGM_SPACE_SWITCH 0x1c | ||
254 | #define PGM_HFP_SQUARE_ROOT 0x1d | ||
255 | #define PGM_PC_TRANSLATION_SPEC 0x1f | ||
256 | #define PGM_AFX_TRANSLATION 0x20 | ||
257 | #define PGM_ASX_TRANSLATION 0x21 | ||
258 | #define PGM_LX_TRANSLATION 0x22 | ||
259 | #define PGM_EX_TRANSLATION 0x23 | ||
260 | #define PGM_PRIMARY_AUTHORITY 0x24 | ||
261 | #define PGM_SECONDARY_AUTHORITY 0x25 | ||
262 | #define PGM_LFX_TRANSLATION 0x26 | ||
263 | #define PGM_LSX_TRANSLATION 0x27 | ||
264 | #define PGM_ALET_SPECIFICATION 0x28 | ||
265 | #define PGM_ALEN_TRANSLATION 0x29 | ||
266 | #define PGM_ALE_SEQUENCE 0x2a | ||
267 | #define PGM_ASTE_VALIDITY 0x2b | ||
268 | #define PGM_ASTE_SEQUENCE 0x2c | ||
269 | #define PGM_EXTENDED_AUTHORITY 0x2d | ||
270 | #define PGM_LSTE_SEQUENCE 0x2e | ||
271 | #define PGM_ASTE_INSTANCE 0x2f | ||
272 | #define PGM_STACK_FULL 0x30 | ||
273 | #define PGM_STACK_EMPTY 0x31 | ||
274 | #define PGM_STACK_SPECIFICATION 0x32 | ||
275 | #define PGM_STACK_TYPE 0x33 | ||
276 | #define PGM_STACK_OPERATION 0x34 | ||
277 | #define PGM_ASCE_TYPE 0x38 | ||
278 | #define PGM_REGION_FIRST_TRANS 0x39 | ||
279 | #define PGM_REGION_SECOND_TRANS 0x3a | ||
280 | #define PGM_REGION_THIRD_TRANS 0x3b | ||
281 | #define PGM_MONITOR 0x40 | ||
282 | #define PGM_PER 0x80 | ||
283 | #define PGM_CRYPTO_OPERATION 0x119 | ||
193 | 284 | ||
194 | struct kvm_s390_interrupt_info { | 285 | struct kvm_s390_interrupt_info { |
195 | struct list_head list; | 286 | struct list_head list; |
@@ -229,6 +320,45 @@ struct kvm_s390_float_interrupt { | |||
229 | unsigned int irq_count; | 320 | unsigned int irq_count; |
230 | }; | 321 | }; |
231 | 322 | ||
323 | struct kvm_hw_wp_info_arch { | ||
324 | unsigned long addr; | ||
325 | unsigned long phys_addr; | ||
326 | int len; | ||
327 | char *old_data; | ||
328 | }; | ||
329 | |||
330 | struct kvm_hw_bp_info_arch { | ||
331 | unsigned long addr; | ||
332 | int len; | ||
333 | }; | ||
334 | |||
335 | /* | ||
336 | * Only the upper 16 bits of kvm_guest_debug->control are arch specific. | ||
337 | * Further KVM_GUESTDBG flags which an be used from userspace can be found in | ||
338 | * arch/s390/include/uapi/asm/kvm.h | ||
339 | */ | ||
340 | #define KVM_GUESTDBG_EXIT_PENDING 0x10000000 | ||
341 | |||
342 | #define guestdbg_enabled(vcpu) \ | ||
343 | (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) | ||
344 | #define guestdbg_sstep_enabled(vcpu) \ | ||
345 | (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | ||
346 | #define guestdbg_hw_bp_enabled(vcpu) \ | ||
347 | (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) | ||
348 | #define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \ | ||
349 | (vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING)) | ||
350 | |||
351 | struct kvm_guestdbg_info_arch { | ||
352 | unsigned long cr0; | ||
353 | unsigned long cr9; | ||
354 | unsigned long cr10; | ||
355 | unsigned long cr11; | ||
356 | struct kvm_hw_bp_info_arch *hw_bp_info; | ||
357 | struct kvm_hw_wp_info_arch *hw_wp_info; | ||
358 | int nr_hw_bp; | ||
359 | int nr_hw_wp; | ||
360 | unsigned long last_bp; | ||
361 | }; | ||
232 | 362 | ||
233 | struct kvm_vcpu_arch { | 363 | struct kvm_vcpu_arch { |
234 | struct kvm_s390_sie_block *sie_block; | 364 | struct kvm_s390_sie_block *sie_block; |
@@ -238,11 +368,13 @@ struct kvm_vcpu_arch { | |||
238 | struct kvm_s390_local_interrupt local_int; | 368 | struct kvm_s390_local_interrupt local_int; |
239 | struct hrtimer ckc_timer; | 369 | struct hrtimer ckc_timer; |
240 | struct tasklet_struct tasklet; | 370 | struct tasklet_struct tasklet; |
371 | struct kvm_s390_pgm_info pgm; | ||
241 | union { | 372 | union { |
242 | struct cpuid cpu_id; | 373 | struct cpuid cpu_id; |
243 | u64 stidp_data; | 374 | u64 stidp_data; |
244 | }; | 375 | }; |
245 | struct gmap *gmap; | 376 | struct gmap *gmap; |
377 | struct kvm_guestdbg_info_arch guestdbg; | ||
246 | #define KVM_S390_PFAULT_TOKEN_INVALID (-1UL) | 378 | #define KVM_S390_PFAULT_TOKEN_INVALID (-1UL) |
247 | unsigned long pfault_token; | 379 | unsigned long pfault_token; |
248 | unsigned long pfault_select; | 380 | unsigned long pfault_select; |
@@ -285,7 +417,10 @@ struct kvm_arch{ | |||
285 | struct gmap *gmap; | 417 | struct gmap *gmap; |
286 | int css_support; | 418 | int css_support; |
287 | int use_irqchip; | 419 | int use_irqchip; |
420 | int use_cmma; | ||
288 | struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; | 421 | struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; |
422 | wait_queue_head_t ipte_wq; | ||
423 | spinlock_t start_stop_lock; | ||
289 | }; | 424 | }; |
290 | 425 | ||
291 | #define KVM_HVA_ERR_BAD (-1UL) | 426 | #define KVM_HVA_ERR_BAD (-1UL) |
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 2070cad80e9e..4349197ab9df 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h | |||
@@ -56,13 +56,14 @@ struct _lowcore { | |||
56 | __u16 pgm_code; /* 0x008e */ | 56 | __u16 pgm_code; /* 0x008e */ |
57 | __u32 trans_exc_code; /* 0x0090 */ | 57 | __u32 trans_exc_code; /* 0x0090 */ |
58 | __u16 mon_class_num; /* 0x0094 */ | 58 | __u16 mon_class_num; /* 0x0094 */ |
59 | __u16 per_perc_atmid; /* 0x0096 */ | 59 | __u8 per_code; /* 0x0096 */ |
60 | __u8 per_atmid; /* 0x0097 */ | ||
60 | __u32 per_address; /* 0x0098 */ | 61 | __u32 per_address; /* 0x0098 */ |
61 | __u32 monitor_code; /* 0x009c */ | 62 | __u32 monitor_code; /* 0x009c */ |
62 | __u8 exc_access_id; /* 0x00a0 */ | 63 | __u8 exc_access_id; /* 0x00a0 */ |
63 | __u8 per_access_id; /* 0x00a1 */ | 64 | __u8 per_access_id; /* 0x00a1 */ |
64 | __u8 op_access_id; /* 0x00a2 */ | 65 | __u8 op_access_id; /* 0x00a2 */ |
65 | __u8 ar_access_id; /* 0x00a3 */ | 66 | __u8 ar_mode_id; /* 0x00a3 */ |
66 | __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */ | 67 | __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */ |
67 | __u16 subchannel_id; /* 0x00b8 */ | 68 | __u16 subchannel_id; /* 0x00b8 */ |
68 | __u16 subchannel_nr; /* 0x00ba */ | 69 | __u16 subchannel_nr; /* 0x00ba */ |
@@ -195,12 +196,13 @@ struct _lowcore { | |||
195 | __u16 pgm_code; /* 0x008e */ | 196 | __u16 pgm_code; /* 0x008e */ |
196 | __u32 data_exc_code; /* 0x0090 */ | 197 | __u32 data_exc_code; /* 0x0090 */ |
197 | __u16 mon_class_num; /* 0x0094 */ | 198 | __u16 mon_class_num; /* 0x0094 */ |
198 | __u16 per_perc_atmid; /* 0x0096 */ | 199 | __u8 per_code; /* 0x0096 */ |
200 | __u8 per_atmid; /* 0x0097 */ | ||
199 | __u64 per_address; /* 0x0098 */ | 201 | __u64 per_address; /* 0x0098 */ |
200 | __u8 exc_access_id; /* 0x00a0 */ | 202 | __u8 exc_access_id; /* 0x00a0 */ |
201 | __u8 per_access_id; /* 0x00a1 */ | 203 | __u8 per_access_id; /* 0x00a1 */ |
202 | __u8 op_access_id; /* 0x00a2 */ | 204 | __u8 op_access_id; /* 0x00a2 */ |
203 | __u8 ar_access_id; /* 0x00a3 */ | 205 | __u8 ar_mode_id; /* 0x00a3 */ |
204 | __u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */ | 206 | __u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */ |
205 | __u64 trans_exc_code; /* 0x00a8 */ | 207 | __u64 trans_exc_code; /* 0x00a8 */ |
206 | __u64 monitor_code; /* 0x00b0 */ | 208 | __u64 monitor_code; /* 0x00b0 */ |
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index f77695a82f64..a5e656260a70 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
@@ -16,6 +16,8 @@ typedef struct { | |||
16 | unsigned long vdso_base; | 16 | unsigned long vdso_base; |
17 | /* The mmu context has extended page tables. */ | 17 | /* The mmu context has extended page tables. */ |
18 | unsigned int has_pgste:1; | 18 | unsigned int has_pgste:1; |
19 | /* The mmu context uses storage keys. */ | ||
20 | unsigned int use_skey:1; | ||
19 | } mm_context_t; | 21 | } mm_context_t; |
20 | 22 | ||
21 | #define INIT_MM_CONTEXT(name) \ | 23 | #define INIT_MM_CONTEXT(name) \ |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 056d7eff2a16..c28f32a45af5 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -23,6 +23,7 @@ static inline int init_new_context(struct task_struct *tsk, | |||
23 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; | 23 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; |
24 | #endif | 24 | #endif |
25 | mm->context.has_pgste = 0; | 25 | mm->context.has_pgste = 0; |
26 | mm->context.use_skey = 0; | ||
26 | mm->context.asce_limit = STACK_TOP_MAX; | 27 | mm->context.asce_limit = STACK_TOP_MAX; |
27 | crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); | 28 | crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); |
28 | return 0; | 29 | return 0; |
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 884017cbfa9f..9e18a61d3df3 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h | |||
@@ -22,7 +22,8 @@ unsigned long *page_table_alloc(struct mm_struct *, unsigned long); | |||
22 | void page_table_free(struct mm_struct *, unsigned long *); | 22 | void page_table_free(struct mm_struct *, unsigned long *); |
23 | void page_table_free_rcu(struct mmu_gather *, unsigned long *); | 23 | void page_table_free_rcu(struct mmu_gather *, unsigned long *); |
24 | 24 | ||
25 | void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long); | 25 | void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long, |
26 | bool init_skey); | ||
26 | int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, | 27 | int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, |
27 | unsigned long key, bool nq); | 28 | unsigned long key, bool nq); |
28 | 29 | ||
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 12f75313e086..fcba5e03839f 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -309,7 +309,8 @@ extern unsigned long MODULES_END; | |||
309 | #define PGSTE_HC_BIT 0x00200000UL | 309 | #define PGSTE_HC_BIT 0x00200000UL |
310 | #define PGSTE_GR_BIT 0x00040000UL | 310 | #define PGSTE_GR_BIT 0x00040000UL |
311 | #define PGSTE_GC_BIT 0x00020000UL | 311 | #define PGSTE_GC_BIT 0x00020000UL |
312 | #define PGSTE_IN_BIT 0x00008000UL /* IPTE notify bit */ | 312 | #define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */ |
313 | #define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */ | ||
313 | 314 | ||
314 | #else /* CONFIG_64BIT */ | 315 | #else /* CONFIG_64BIT */ |
315 | 316 | ||
@@ -391,7 +392,8 @@ extern unsigned long MODULES_END; | |||
391 | #define PGSTE_HC_BIT 0x0020000000000000UL | 392 | #define PGSTE_HC_BIT 0x0020000000000000UL |
392 | #define PGSTE_GR_BIT 0x0004000000000000UL | 393 | #define PGSTE_GR_BIT 0x0004000000000000UL |
393 | #define PGSTE_GC_BIT 0x0002000000000000UL | 394 | #define PGSTE_GC_BIT 0x0002000000000000UL |
394 | #define PGSTE_IN_BIT 0x0000800000000000UL /* IPTE notify bit */ | 395 | #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ |
396 | #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ | ||
395 | 397 | ||
396 | #endif /* CONFIG_64BIT */ | 398 | #endif /* CONFIG_64BIT */ |
397 | 399 | ||
@@ -466,6 +468,16 @@ static inline int mm_has_pgste(struct mm_struct *mm) | |||
466 | #endif | 468 | #endif |
467 | return 0; | 469 | return 0; |
468 | } | 470 | } |
471 | |||
472 | static inline int mm_use_skey(struct mm_struct *mm) | ||
473 | { | ||
474 | #ifdef CONFIG_PGSTE | ||
475 | if (mm->context.use_skey) | ||
476 | return 1; | ||
477 | #endif | ||
478 | return 0; | ||
479 | } | ||
480 | |||
469 | /* | 481 | /* |
470 | * pgd/pmd/pte query functions | 482 | * pgd/pmd/pte query functions |
471 | */ | 483 | */ |
@@ -699,26 +711,17 @@ static inline void pgste_set(pte_t *ptep, pgste_t pgste) | |||
699 | #endif | 711 | #endif |
700 | } | 712 | } |
701 | 713 | ||
702 | static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) | 714 | static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste, |
715 | struct mm_struct *mm) | ||
703 | { | 716 | { |
704 | #ifdef CONFIG_PGSTE | 717 | #ifdef CONFIG_PGSTE |
705 | unsigned long address, bits, skey; | 718 | unsigned long address, bits, skey; |
706 | 719 | ||
707 | if (pte_val(*ptep) & _PAGE_INVALID) | 720 | if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID) |
708 | return pgste; | 721 | return pgste; |
709 | address = pte_val(*ptep) & PAGE_MASK; | 722 | address = pte_val(*ptep) & PAGE_MASK; |
710 | skey = (unsigned long) page_get_storage_key(address); | 723 | skey = (unsigned long) page_get_storage_key(address); |
711 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); | 724 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); |
712 | if (!(pgste_val(pgste) & PGSTE_HC_BIT) && (bits & _PAGE_CHANGED)) { | ||
713 | /* Transfer dirty + referenced bit to host bits in pgste */ | ||
714 | pgste_val(pgste) |= bits << 52; | ||
715 | page_set_storage_key(address, skey ^ bits, 0); | ||
716 | } else if (!(pgste_val(pgste) & PGSTE_HR_BIT) && | ||
717 | (bits & _PAGE_REFERENCED)) { | ||
718 | /* Transfer referenced bit to host bit in pgste */ | ||
719 | pgste_val(pgste) |= PGSTE_HR_BIT; | ||
720 | page_reset_referenced(address); | ||
721 | } | ||
722 | /* Transfer page changed & referenced bit to guest bits in pgste */ | 725 | /* Transfer page changed & referenced bit to guest bits in pgste */ |
723 | pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ | 726 | pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ |
724 | /* Copy page access key and fetch protection bit to pgste */ | 727 | /* Copy page access key and fetch protection bit to pgste */ |
@@ -729,25 +732,14 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) | |||
729 | 732 | ||
730 | } | 733 | } |
731 | 734 | ||
732 | static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) | 735 | static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry, |
733 | { | 736 | struct mm_struct *mm) |
734 | #ifdef CONFIG_PGSTE | ||
735 | if (pte_val(*ptep) & _PAGE_INVALID) | ||
736 | return pgste; | ||
737 | /* Get referenced bit from storage key */ | ||
738 | if (page_reset_referenced(pte_val(*ptep) & PAGE_MASK)) | ||
739 | pgste_val(pgste) |= PGSTE_HR_BIT | PGSTE_GR_BIT; | ||
740 | #endif | ||
741 | return pgste; | ||
742 | } | ||
743 | |||
744 | static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) | ||
745 | { | 737 | { |
746 | #ifdef CONFIG_PGSTE | 738 | #ifdef CONFIG_PGSTE |
747 | unsigned long address; | 739 | unsigned long address; |
748 | unsigned long nkey; | 740 | unsigned long nkey; |
749 | 741 | ||
750 | if (pte_val(entry) & _PAGE_INVALID) | 742 | if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID) |
751 | return; | 743 | return; |
752 | VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); | 744 | VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); |
753 | address = pte_val(entry) & PAGE_MASK; | 745 | address = pte_val(entry) & PAGE_MASK; |
@@ -757,23 +749,30 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) | |||
757 | * key C/R to 0. | 749 | * key C/R to 0. |
758 | */ | 750 | */ |
759 | nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; | 751 | nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; |
752 | nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48; | ||
760 | page_set_storage_key(address, nkey, 0); | 753 | page_set_storage_key(address, nkey, 0); |
761 | #endif | 754 | #endif |
762 | } | 755 | } |
763 | 756 | ||
764 | static inline void pgste_set_pte(pte_t *ptep, pte_t entry) | 757 | static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) |
765 | { | 758 | { |
766 | if (!MACHINE_HAS_ESOP && | 759 | if ((pte_val(entry) & _PAGE_PRESENT) && |
767 | (pte_val(entry) & _PAGE_PRESENT) && | 760 | (pte_val(entry) & _PAGE_WRITE) && |
768 | (pte_val(entry) & _PAGE_WRITE)) { | 761 | !(pte_val(entry) & _PAGE_INVALID)) { |
769 | /* | 762 | if (!MACHINE_HAS_ESOP) { |
770 | * Without enhanced suppression-on-protection force | 763 | /* |
771 | * the dirty bit on for all writable ptes. | 764 | * Without enhanced suppression-on-protection force |
772 | */ | 765 | * the dirty bit on for all writable ptes. |
773 | pte_val(entry) |= _PAGE_DIRTY; | 766 | */ |
774 | pte_val(entry) &= ~_PAGE_PROTECT; | 767 | pte_val(entry) |= _PAGE_DIRTY; |
768 | pte_val(entry) &= ~_PAGE_PROTECT; | ||
769 | } | ||
770 | if (!(pte_val(entry) & _PAGE_PROTECT)) | ||
771 | /* This pte allows write access, set user-dirty */ | ||
772 | pgste_val(pgste) |= PGSTE_UC_BIT; | ||
775 | } | 773 | } |
776 | *ptep = entry; | 774 | *ptep = entry; |
775 | return pgste; | ||
777 | } | 776 | } |
778 | 777 | ||
779 | /** | 778 | /** |
@@ -839,6 +838,8 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *); | |||
839 | unsigned long gmap_fault(unsigned long address, struct gmap *); | 838 | unsigned long gmap_fault(unsigned long address, struct gmap *); |
840 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *); | 839 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *); |
841 | void __gmap_zap(unsigned long address, struct gmap *); | 840 | void __gmap_zap(unsigned long address, struct gmap *); |
841 | bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *); | ||
842 | |||
842 | 843 | ||
843 | void gmap_register_ipte_notifier(struct gmap_notifier *); | 844 | void gmap_register_ipte_notifier(struct gmap_notifier *); |
844 | void gmap_unregister_ipte_notifier(struct gmap_notifier *); | 845 | void gmap_unregister_ipte_notifier(struct gmap_notifier *); |
@@ -870,8 +871,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
870 | if (mm_has_pgste(mm)) { | 871 | if (mm_has_pgste(mm)) { |
871 | pgste = pgste_get_lock(ptep); | 872 | pgste = pgste_get_lock(ptep); |
872 | pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; | 873 | pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; |
873 | pgste_set_key(ptep, pgste, entry); | 874 | pgste_set_key(ptep, pgste, entry, mm); |
874 | pgste_set_pte(ptep, entry); | 875 | pgste = pgste_set_pte(ptep, pgste, entry); |
875 | pgste_set_unlock(ptep, pgste); | 876 | pgste_set_unlock(ptep, pgste); |
876 | } else { | 877 | } else { |
877 | if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1) | 878 | if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1) |
@@ -1017,45 +1018,6 @@ static inline pte_t pte_mkhuge(pte_t pte) | |||
1017 | } | 1018 | } |
1018 | #endif | 1019 | #endif |
1019 | 1020 | ||
1020 | /* | ||
1021 | * Get (and clear) the user dirty bit for a pte. | ||
1022 | */ | ||
1023 | static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, | ||
1024 | pte_t *ptep) | ||
1025 | { | ||
1026 | pgste_t pgste; | ||
1027 | int dirty = 0; | ||
1028 | |||
1029 | if (mm_has_pgste(mm)) { | ||
1030 | pgste = pgste_get_lock(ptep); | ||
1031 | pgste = pgste_update_all(ptep, pgste); | ||
1032 | dirty = !!(pgste_val(pgste) & PGSTE_HC_BIT); | ||
1033 | pgste_val(pgste) &= ~PGSTE_HC_BIT; | ||
1034 | pgste_set_unlock(ptep, pgste); | ||
1035 | return dirty; | ||
1036 | } | ||
1037 | return dirty; | ||
1038 | } | ||
1039 | |||
1040 | /* | ||
1041 | * Get (and clear) the user referenced bit for a pte. | ||
1042 | */ | ||
1043 | static inline int ptep_test_and_clear_user_young(struct mm_struct *mm, | ||
1044 | pte_t *ptep) | ||
1045 | { | ||
1046 | pgste_t pgste; | ||
1047 | int young = 0; | ||
1048 | |||
1049 | if (mm_has_pgste(mm)) { | ||
1050 | pgste = pgste_get_lock(ptep); | ||
1051 | pgste = pgste_update_young(ptep, pgste); | ||
1052 | young = !!(pgste_val(pgste) & PGSTE_HR_BIT); | ||
1053 | pgste_val(pgste) &= ~PGSTE_HR_BIT; | ||
1054 | pgste_set_unlock(ptep, pgste); | ||
1055 | } | ||
1056 | return young; | ||
1057 | } | ||
1058 | |||
1059 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) | 1021 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) |
1060 | { | 1022 | { |
1061 | unsigned long pto = (unsigned long) ptep; | 1023 | unsigned long pto = (unsigned long) ptep; |
@@ -1118,6 +1080,36 @@ static inline void ptep_flush_lazy(struct mm_struct *mm, | |||
1118 | atomic_sub(0x10000, &mm->context.attach_count); | 1080 | atomic_sub(0x10000, &mm->context.attach_count); |
1119 | } | 1081 | } |
1120 | 1082 | ||
1083 | /* | ||
1084 | * Get (and clear) the user dirty bit for a pte. | ||
1085 | */ | ||
1086 | static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, | ||
1087 | unsigned long addr, | ||
1088 | pte_t *ptep) | ||
1089 | { | ||
1090 | pgste_t pgste; | ||
1091 | pte_t pte; | ||
1092 | int dirty; | ||
1093 | |||
1094 | if (!mm_has_pgste(mm)) | ||
1095 | return 0; | ||
1096 | pgste = pgste_get_lock(ptep); | ||
1097 | dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT); | ||
1098 | pgste_val(pgste) &= ~PGSTE_UC_BIT; | ||
1099 | pte = *ptep; | ||
1100 | if (dirty && (pte_val(pte) & _PAGE_PRESENT)) { | ||
1101 | pgste = pgste_ipte_notify(mm, ptep, pgste); | ||
1102 | __ptep_ipte(addr, ptep); | ||
1103 | if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) | ||
1104 | pte_val(pte) |= _PAGE_PROTECT; | ||
1105 | else | ||
1106 | pte_val(pte) |= _PAGE_INVALID; | ||
1107 | *ptep = pte; | ||
1108 | } | ||
1109 | pgste_set_unlock(ptep, pgste); | ||
1110 | return dirty; | ||
1111 | } | ||
1112 | |||
1121 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 1113 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
1122 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | 1114 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, |
1123 | unsigned long addr, pte_t *ptep) | 1115 | unsigned long addr, pte_t *ptep) |
@@ -1137,7 +1129,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | |||
1137 | pte = pte_mkold(pte); | 1129 | pte = pte_mkold(pte); |
1138 | 1130 | ||
1139 | if (mm_has_pgste(vma->vm_mm)) { | 1131 | if (mm_has_pgste(vma->vm_mm)) { |
1140 | pgste_set_pte(ptep, pte); | 1132 | pgste = pgste_set_pte(ptep, pgste, pte); |
1141 | pgste_set_unlock(ptep, pgste); | 1133 | pgste_set_unlock(ptep, pgste); |
1142 | } else | 1134 | } else |
1143 | *ptep = pte; | 1135 | *ptep = pte; |
@@ -1182,7 +1174,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |||
1182 | pte_val(*ptep) = _PAGE_INVALID; | 1174 | pte_val(*ptep) = _PAGE_INVALID; |
1183 | 1175 | ||
1184 | if (mm_has_pgste(mm)) { | 1176 | if (mm_has_pgste(mm)) { |
1185 | pgste = pgste_update_all(&pte, pgste); | 1177 | pgste = pgste_update_all(&pte, pgste, mm); |
1186 | pgste_set_unlock(ptep, pgste); | 1178 | pgste_set_unlock(ptep, pgste); |
1187 | } | 1179 | } |
1188 | return pte; | 1180 | return pte; |
@@ -1205,7 +1197,7 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, | |||
1205 | ptep_flush_lazy(mm, address, ptep); | 1197 | ptep_flush_lazy(mm, address, ptep); |
1206 | 1198 | ||
1207 | if (mm_has_pgste(mm)) { | 1199 | if (mm_has_pgste(mm)) { |
1208 | pgste = pgste_update_all(&pte, pgste); | 1200 | pgste = pgste_update_all(&pte, pgste, mm); |
1209 | pgste_set(ptep, pgste); | 1201 | pgste_set(ptep, pgste); |
1210 | } | 1202 | } |
1211 | return pte; | 1203 | return pte; |
@@ -1219,8 +1211,8 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, | |||
1219 | 1211 | ||
1220 | if (mm_has_pgste(mm)) { | 1212 | if (mm_has_pgste(mm)) { |
1221 | pgste = pgste_get(ptep); | 1213 | pgste = pgste_get(ptep); |
1222 | pgste_set_key(ptep, pgste, pte); | 1214 | pgste_set_key(ptep, pgste, pte, mm); |
1223 | pgste_set_pte(ptep, pte); | 1215 | pgste = pgste_set_pte(ptep, pgste, pte); |
1224 | pgste_set_unlock(ptep, pgste); | 1216 | pgste_set_unlock(ptep, pgste); |
1225 | } else | 1217 | } else |
1226 | *ptep = pte; | 1218 | *ptep = pte; |
@@ -1246,7 +1238,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, | |||
1246 | if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == | 1238 | if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == |
1247 | _PGSTE_GPS_USAGE_UNUSED) | 1239 | _PGSTE_GPS_USAGE_UNUSED) |
1248 | pte_val(pte) |= _PAGE_UNUSED; | 1240 | pte_val(pte) |= _PAGE_UNUSED; |
1249 | pgste = pgste_update_all(&pte, pgste); | 1241 | pgste = pgste_update_all(&pte, pgste, vma->vm_mm); |
1250 | pgste_set_unlock(ptep, pgste); | 1242 | pgste_set_unlock(ptep, pgste); |
1251 | } | 1243 | } |
1252 | return pte; | 1244 | return pte; |
@@ -1278,7 +1270,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | |||
1278 | pte_val(*ptep) = _PAGE_INVALID; | 1270 | pte_val(*ptep) = _PAGE_INVALID; |
1279 | 1271 | ||
1280 | if (!full && mm_has_pgste(mm)) { | 1272 | if (!full && mm_has_pgste(mm)) { |
1281 | pgste = pgste_update_all(&pte, pgste); | 1273 | pgste = pgste_update_all(&pte, pgste, mm); |
1282 | pgste_set_unlock(ptep, pgste); | 1274 | pgste_set_unlock(ptep, pgste); |
1283 | } | 1275 | } |
1284 | return pte; | 1276 | return pte; |
@@ -1301,7 +1293,7 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, | |||
1301 | pte = pte_wrprotect(pte); | 1293 | pte = pte_wrprotect(pte); |
1302 | 1294 | ||
1303 | if (mm_has_pgste(mm)) { | 1295 | if (mm_has_pgste(mm)) { |
1304 | pgste_set_pte(ptep, pte); | 1296 | pgste = pgste_set_pte(ptep, pgste, pte); |
1305 | pgste_set_unlock(ptep, pgste); | 1297 | pgste_set_unlock(ptep, pgste); |
1306 | } else | 1298 | } else |
1307 | *ptep = pte; | 1299 | *ptep = pte; |
@@ -1326,7 +1318,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, | |||
1326 | ptep_flush_direct(vma->vm_mm, address, ptep); | 1318 | ptep_flush_direct(vma->vm_mm, address, ptep); |
1327 | 1319 | ||
1328 | if (mm_has_pgste(vma->vm_mm)) { | 1320 | if (mm_has_pgste(vma->vm_mm)) { |
1329 | pgste_set_pte(ptep, entry); | 1321 | pgste = pgste_set_pte(ptep, pgste, entry); |
1330 | pgste_set_unlock(ptep, pgste); | 1322 | pgste_set_unlock(ptep, pgste); |
1331 | } else | 1323 | } else |
1332 | *ptep = entry; | 1324 | *ptep = entry; |
@@ -1734,6 +1726,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) | |||
1734 | extern int vmem_add_mapping(unsigned long start, unsigned long size); | 1726 | extern int vmem_add_mapping(unsigned long start, unsigned long size); |
1735 | extern int vmem_remove_mapping(unsigned long start, unsigned long size); | 1727 | extern int vmem_remove_mapping(unsigned long start, unsigned long size); |
1736 | extern int s390_enable_sie(void); | 1728 | extern int s390_enable_sie(void); |
1729 | extern void s390_enable_skey(void); | ||
1737 | 1730 | ||
1738 | /* | 1731 | /* |
1739 | * No page table caches to initialise | 1732 | * No page table caches to initialise |
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 1b5300cd6d22..55d69dd7473c 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h | |||
@@ -22,6 +22,50 @@ | |||
22 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \ | 22 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \ |
23 | PSW_MASK_PSTATE | PSW_ASC_PRIMARY) | 23 | PSW_MASK_PSTATE | PSW_ASC_PRIMARY) |
24 | 24 | ||
25 | struct psw_bits { | ||
26 | unsigned long long : 1; | ||
27 | unsigned long long r : 1; /* PER-Mask */ | ||
28 | unsigned long long : 3; | ||
29 | unsigned long long t : 1; /* DAT Mode */ | ||
30 | unsigned long long i : 1; /* Input/Output Mask */ | ||
31 | unsigned long long e : 1; /* External Mask */ | ||
32 | unsigned long long key : 4; /* PSW Key */ | ||
33 | unsigned long long : 1; | ||
34 | unsigned long long m : 1; /* Machine-Check Mask */ | ||
35 | unsigned long long w : 1; /* Wait State */ | ||
36 | unsigned long long p : 1; /* Problem State */ | ||
37 | unsigned long long as : 2; /* Address Space Control */ | ||
38 | unsigned long long cc : 2; /* Condition Code */ | ||
39 | unsigned long long pm : 4; /* Program Mask */ | ||
40 | unsigned long long ri : 1; /* Runtime Instrumentation */ | ||
41 | unsigned long long : 6; | ||
42 | unsigned long long eaba : 2; /* Addressing Mode */ | ||
43 | #ifdef CONFIG_64BIT | ||
44 | unsigned long long : 31; | ||
45 | unsigned long long ia : 64;/* Instruction Address */ | ||
46 | #else | ||
47 | unsigned long long ia : 31;/* Instruction Address */ | ||
48 | #endif | ||
49 | }; | ||
50 | |||
51 | enum { | ||
52 | PSW_AMODE_24BIT = 0, | ||
53 | PSW_AMODE_31BIT = 1, | ||
54 | PSW_AMODE_64BIT = 3 | ||
55 | }; | ||
56 | |||
57 | enum { | ||
58 | PSW_AS_PRIMARY = 0, | ||
59 | PSW_AS_ACCREG = 1, | ||
60 | PSW_AS_SECONDARY = 2, | ||
61 | PSW_AS_HOME = 3 | ||
62 | }; | ||
63 | |||
64 | #define psw_bits(__psw) (*({ \ | ||
65 | typecheck(psw_t, __psw); \ | ||
66 | &(*(struct psw_bits *)(&(__psw))); \ | ||
67 | })) | ||
68 | |||
25 | /* | 69 | /* |
26 | * The pt_regs struct defines the way the registers are stored on | 70 | * The pt_regs struct defines the way the registers are stored on |
27 | * the stack during a system call. | 71 | * the stack during a system call. |
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 2f5e9932b4de..1aba89b53cb9 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h | |||
@@ -28,7 +28,11 @@ struct sclp_ipl_info { | |||
28 | 28 | ||
29 | struct sclp_cpu_entry { | 29 | struct sclp_cpu_entry { |
30 | u8 address; | 30 | u8 address; |
31 | u8 reserved0[13]; | 31 | u8 reserved0[2]; |
32 | u8 : 3; | ||
33 | u8 siif : 1; | ||
34 | u8 : 4; | ||
35 | u8 reserved2[10]; | ||
32 | u8 type; | 36 | u8 type; |
33 | u8 reserved1; | 37 | u8 reserved1; |
34 | } __attribute__((packed)); | 38 | } __attribute__((packed)); |
@@ -61,5 +65,7 @@ int sclp_pci_deconfigure(u32 fid); | |||
61 | int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); | 65 | int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); |
62 | unsigned long sclp_get_hsa_size(void); | 66 | unsigned long sclp_get_hsa_size(void); |
63 | void sclp_early_detect(void); | 67 | void sclp_early_detect(void); |
68 | int sclp_has_siif(void); | ||
69 | unsigned int sclp_get_ibc(void); | ||
64 | 70 | ||
65 | #endif /* _ASM_S390_SCLP_H */ | 71 | #endif /* _ASM_S390_SCLP_H */ |
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index c003c6a73b1e..0fc26430a1e5 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | 16 | ||
17 | #define __KVM_S390 | 17 | #define __KVM_S390 |
18 | #define __KVM_HAVE_GUEST_DEBUG | ||
18 | 19 | ||
19 | /* Device control API: s390-specific devices */ | 20 | /* Device control API: s390-specific devices */ |
20 | #define KVM_DEV_FLIC_GET_ALL_IRQS 1 | 21 | #define KVM_DEV_FLIC_GET_ALL_IRQS 1 |
@@ -54,6 +55,13 @@ struct kvm_s390_io_adapter_req { | |||
54 | __u64 addr; | 55 | __u64 addr; |
55 | }; | 56 | }; |
56 | 57 | ||
58 | /* kvm attr_group on vm fd */ | ||
59 | #define KVM_S390_VM_MEM_CTRL 0 | ||
60 | |||
61 | /* kvm attributes for mem_ctrl */ | ||
62 | #define KVM_S390_VM_MEM_ENABLE_CMMA 0 | ||
63 | #define KVM_S390_VM_MEM_CLR_CMMA 1 | ||
64 | |||
57 | /* for KVM_GET_REGS and KVM_SET_REGS */ | 65 | /* for KVM_GET_REGS and KVM_SET_REGS */ |
58 | struct kvm_regs { | 66 | struct kvm_regs { |
59 | /* general purpose regs for s390 */ | 67 | /* general purpose regs for s390 */ |
@@ -72,11 +80,31 @@ struct kvm_fpu { | |||
72 | __u64 fprs[16]; | 80 | __u64 fprs[16]; |
73 | }; | 81 | }; |
74 | 82 | ||
83 | #define KVM_GUESTDBG_USE_HW_BP 0x00010000 | ||
84 | |||
85 | #define KVM_HW_BP 1 | ||
86 | #define KVM_HW_WP_WRITE 2 | ||
87 | #define KVM_SINGLESTEP 4 | ||
88 | |||
75 | struct kvm_debug_exit_arch { | 89 | struct kvm_debug_exit_arch { |
90 | __u64 addr; | ||
91 | __u8 type; | ||
92 | __u8 pad[7]; /* Should be set to 0 */ | ||
93 | }; | ||
94 | |||
95 | struct kvm_hw_breakpoint { | ||
96 | __u64 addr; | ||
97 | __u64 phys_addr; | ||
98 | __u64 len; | ||
99 | __u8 type; | ||
100 | __u8 pad[7]; /* Should be set to 0 */ | ||
76 | }; | 101 | }; |
77 | 102 | ||
78 | /* for KVM_SET_GUEST_DEBUG */ | 103 | /* for KVM_SET_GUEST_DEBUG */ |
79 | struct kvm_guest_debug_arch { | 104 | struct kvm_guest_debug_arch { |
105 | __u32 nr_hw_bp; | ||
106 | __u32 pad; /* Should be set to 0 */ | ||
107 | struct kvm_hw_breakpoint __user *hw_bp; | ||
80 | }; | 108 | }; |
81 | 109 | ||
82 | #define KVM_SYNC_PREFIX (1UL << 0) | 110 | #define KVM_SYNC_PREFIX (1UL << 0) |
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h new file mode 100644 index 000000000000..3d97f610198d --- /dev/null +++ b/arch/s390/include/uapi/asm/sie.h | |||
@@ -0,0 +1,245 @@ | |||
1 | #ifndef _UAPI_ASM_S390_SIE_H | ||
2 | #define _UAPI_ASM_S390_SIE_H | ||
3 | |||
4 | #include <asm/sigp.h> | ||
5 | |||
6 | #define diagnose_codes \ | ||
7 | { 0x10, "DIAG (0x10) release pages" }, \ | ||
8 | { 0x44, "DIAG (0x44) time slice end" }, \ | ||
9 | { 0x9c, "DIAG (0x9c) time slice end directed" }, \ | ||
10 | { 0x204, "DIAG (0x204) logical-cpu utilization" }, \ | ||
11 | { 0x258, "DIAG (0x258) page-reference services" }, \ | ||
12 | { 0x308, "DIAG (0x308) ipl functions" }, \ | ||
13 | { 0x500, "DIAG (0x500) KVM virtio functions" }, \ | ||
14 | { 0x501, "DIAG (0x501) KVM breakpoint" } | ||
15 | |||
16 | #define sigp_order_codes \ | ||
17 | { SIGP_SENSE, "SIGP sense" }, \ | ||
18 | { SIGP_EXTERNAL_CALL, "SIGP external call" }, \ | ||
19 | { SIGP_EMERGENCY_SIGNAL, "SIGP emergency signal" }, \ | ||
20 | { SIGP_STOP, "SIGP stop" }, \ | ||
21 | { SIGP_STOP_AND_STORE_STATUS, "SIGP stop and store status" }, \ | ||
22 | { SIGP_SET_ARCHITECTURE, "SIGP set architecture" }, \ | ||
23 | { SIGP_SET_PREFIX, "SIGP set prefix" }, \ | ||
24 | { SIGP_SENSE_RUNNING, "SIGP sense running" }, \ | ||
25 | { SIGP_RESTART, "SIGP restart" }, \ | ||
26 | { SIGP_INITIAL_CPU_RESET, "SIGP initial cpu reset" }, \ | ||
27 | { SIGP_STORE_STATUS_AT_ADDRESS, "SIGP store status at address" } | ||
28 | |||
29 | #define icpt_prog_codes \ | ||
30 | { 0x0001, "Prog Operation" }, \ | ||
31 | { 0x0002, "Prog Privileged Operation" }, \ | ||
32 | { 0x0003, "Prog Execute" }, \ | ||
33 | { 0x0004, "Prog Protection" }, \ | ||
34 | { 0x0005, "Prog Addressing" }, \ | ||
35 | { 0x0006, "Prog Specification" }, \ | ||
36 | { 0x0007, "Prog Data" }, \ | ||
37 | { 0x0008, "Prog Fixedpoint overflow" }, \ | ||
38 | { 0x0009, "Prog Fixedpoint divide" }, \ | ||
39 | { 0x000A, "Prog Decimal overflow" }, \ | ||
40 | { 0x000B, "Prog Decimal divide" }, \ | ||
41 | { 0x000C, "Prog HFP exponent overflow" }, \ | ||
42 | { 0x000D, "Prog HFP exponent underflow" }, \ | ||
43 | { 0x000E, "Prog HFP significance" }, \ | ||
44 | { 0x000F, "Prog HFP divide" }, \ | ||
45 | { 0x0010, "Prog Segment translation" }, \ | ||
46 | { 0x0011, "Prog Page translation" }, \ | ||
47 | { 0x0012, "Prog Translation specification" }, \ | ||
48 | { 0x0013, "Prog Special operation" }, \ | ||
49 | { 0x0015, "Prog Operand" }, \ | ||
50 | { 0x0016, "Prog Trace table" }, \ | ||
51 | { 0x0017, "Prog ASNtranslation specification" }, \ | ||
52 | { 0x001C, "Prog Spaceswitch event" }, \ | ||
53 | { 0x001D, "Prog HFP square root" }, \ | ||
54 | { 0x001F, "Prog PCtranslation specification" }, \ | ||
55 | { 0x0020, "Prog AFX translation" }, \ | ||
56 | { 0x0021, "Prog ASX translation" }, \ | ||
57 | { 0x0022, "Prog LX translation" }, \ | ||
58 | { 0x0023, "Prog EX translation" }, \ | ||
59 | { 0x0024, "Prog Primary authority" }, \ | ||
60 | { 0x0025, "Prog Secondary authority" }, \ | ||
61 | { 0x0026, "Prog LFXtranslation exception" }, \ | ||
62 | { 0x0027, "Prog LSXtranslation exception" }, \ | ||
63 | { 0x0028, "Prog ALET specification" }, \ | ||
64 | { 0x0029, "Prog ALEN translation" }, \ | ||
65 | { 0x002A, "Prog ALE sequence" }, \ | ||
66 | { 0x002B, "Prog ASTE validity" }, \ | ||
67 | { 0x002C, "Prog ASTE sequence" }, \ | ||
68 | { 0x002D, "Prog Extended authority" }, \ | ||
69 | { 0x002E, "Prog LSTE sequence" }, \ | ||
70 | { 0x002F, "Prog ASTE instance" }, \ | ||
71 | { 0x0030, "Prog Stack full" }, \ | ||
72 | { 0x0031, "Prog Stack empty" }, \ | ||
73 | { 0x0032, "Prog Stack specification" }, \ | ||
74 | { 0x0033, "Prog Stack type" }, \ | ||
75 | { 0x0034, "Prog Stack operation" }, \ | ||
76 | { 0x0039, "Prog Region first translation" }, \ | ||
77 | { 0x003A, "Prog Region second translation" }, \ | ||
78 | { 0x003B, "Prog Region third translation" }, \ | ||
79 | { 0x0040, "Prog Monitor event" }, \ | ||
80 | { 0x0080, "Prog PER event" }, \ | ||
81 | { 0x0119, "Prog Crypto operation" } | ||
82 | |||
83 | #define exit_code_ipa0(ipa0, opcode, mnemonic) \ | ||
84 | { (ipa0 << 8 | opcode), #ipa0 " " mnemonic } | ||
85 | #define exit_code(opcode, mnemonic) \ | ||
86 | { opcode, mnemonic } | ||
87 | |||
88 | #define icpt_insn_codes \ | ||
89 | exit_code_ipa0(0x01, 0x01, "PR"), \ | ||
90 | exit_code_ipa0(0x01, 0x04, "PTFF"), \ | ||
91 | exit_code_ipa0(0x01, 0x07, "SCKPF"), \ | ||
92 | exit_code_ipa0(0xAA, 0x00, "RINEXT"), \ | ||
93 | exit_code_ipa0(0xAA, 0x01, "RION"), \ | ||
94 | exit_code_ipa0(0xAA, 0x02, "TRIC"), \ | ||
95 | exit_code_ipa0(0xAA, 0x03, "RIOFF"), \ | ||
96 | exit_code_ipa0(0xAA, 0x04, "RIEMIT"), \ | ||
97 | exit_code_ipa0(0xB2, 0x02, "STIDP"), \ | ||
98 | exit_code_ipa0(0xB2, 0x04, "SCK"), \ | ||
99 | exit_code_ipa0(0xB2, 0x05, "STCK"), \ | ||
100 | exit_code_ipa0(0xB2, 0x06, "SCKC"), \ | ||
101 | exit_code_ipa0(0xB2, 0x07, "STCKC"), \ | ||
102 | exit_code_ipa0(0xB2, 0x08, "SPT"), \ | ||
103 | exit_code_ipa0(0xB2, 0x09, "STPT"), \ | ||
104 | exit_code_ipa0(0xB2, 0x0d, "PTLB"), \ | ||
105 | exit_code_ipa0(0xB2, 0x10, "SPX"), \ | ||
106 | exit_code_ipa0(0xB2, 0x11, "STPX"), \ | ||
107 | exit_code_ipa0(0xB2, 0x12, "STAP"), \ | ||
108 | exit_code_ipa0(0xB2, 0x14, "SIE"), \ | ||
109 | exit_code_ipa0(0xB2, 0x16, "SETR"), \ | ||
110 | exit_code_ipa0(0xB2, 0x17, "STETR"), \ | ||
111 | exit_code_ipa0(0xB2, 0x18, "PC"), \ | ||
112 | exit_code_ipa0(0xB2, 0x20, "SERVC"), \ | ||
113 | exit_code_ipa0(0xB2, 0x28, "PT"), \ | ||
114 | exit_code_ipa0(0xB2, 0x29, "ISKE"), \ | ||
115 | exit_code_ipa0(0xB2, 0x2a, "RRBE"), \ | ||
116 | exit_code_ipa0(0xB2, 0x2b, "SSKE"), \ | ||
117 | exit_code_ipa0(0xB2, 0x2c, "TB"), \ | ||
118 | exit_code_ipa0(0xB2, 0x2e, "PGIN"), \ | ||
119 | exit_code_ipa0(0xB2, 0x2f, "PGOUT"), \ | ||
120 | exit_code_ipa0(0xB2, 0x30, "CSCH"), \ | ||
121 | exit_code_ipa0(0xB2, 0x31, "HSCH"), \ | ||
122 | exit_code_ipa0(0xB2, 0x32, "MSCH"), \ | ||
123 | exit_code_ipa0(0xB2, 0x33, "SSCH"), \ | ||
124 | exit_code_ipa0(0xB2, 0x34, "STSCH"), \ | ||
125 | exit_code_ipa0(0xB2, 0x35, "TSCH"), \ | ||
126 | exit_code_ipa0(0xB2, 0x36, "TPI"), \ | ||
127 | exit_code_ipa0(0xB2, 0x37, "SAL"), \ | ||
128 | exit_code_ipa0(0xB2, 0x38, "RSCH"), \ | ||
129 | exit_code_ipa0(0xB2, 0x39, "STCRW"), \ | ||
130 | exit_code_ipa0(0xB2, 0x3a, "STCPS"), \ | ||
131 | exit_code_ipa0(0xB2, 0x3b, "RCHP"), \ | ||
132 | exit_code_ipa0(0xB2, 0x3c, "SCHM"), \ | ||
133 | exit_code_ipa0(0xB2, 0x40, "BAKR"), \ | ||
134 | exit_code_ipa0(0xB2, 0x48, "PALB"), \ | ||
135 | exit_code_ipa0(0xB2, 0x4c, "TAR"), \ | ||
136 | exit_code_ipa0(0xB2, 0x50, "CSP"), \ | ||
137 | exit_code_ipa0(0xB2, 0x54, "MVPG"), \ | ||
138 | exit_code_ipa0(0xB2, 0x58, "BSG"), \ | ||
139 | exit_code_ipa0(0xB2, 0x5a, "BSA"), \ | ||
140 | exit_code_ipa0(0xB2, 0x5f, "CHSC"), \ | ||
141 | exit_code_ipa0(0xB2, 0x74, "SIGA"), \ | ||
142 | exit_code_ipa0(0xB2, 0x76, "XSCH"), \ | ||
143 | exit_code_ipa0(0xB2, 0x78, "STCKE"), \ | ||
144 | exit_code_ipa0(0xB2, 0x7c, "STCKF"), \ | ||
145 | exit_code_ipa0(0xB2, 0x7d, "STSI"), \ | ||
146 | exit_code_ipa0(0xB2, 0xb0, "STFLE"), \ | ||
147 | exit_code_ipa0(0xB2, 0xb1, "STFL"), \ | ||
148 | exit_code_ipa0(0xB2, 0xb2, "LPSWE"), \ | ||
149 | exit_code_ipa0(0xB2, 0xf8, "TEND"), \ | ||
150 | exit_code_ipa0(0xB2, 0xfc, "TABORT"), \ | ||
151 | exit_code_ipa0(0xB9, 0x1e, "KMAC"), \ | ||
152 | exit_code_ipa0(0xB9, 0x28, "PCKMO"), \ | ||
153 | exit_code_ipa0(0xB9, 0x2a, "KMF"), \ | ||
154 | exit_code_ipa0(0xB9, 0x2b, "KMO"), \ | ||
155 | exit_code_ipa0(0xB9, 0x2d, "KMCTR"), \ | ||
156 | exit_code_ipa0(0xB9, 0x2e, "KM"), \ | ||
157 | exit_code_ipa0(0xB9, 0x2f, "KMC"), \ | ||
158 | exit_code_ipa0(0xB9, 0x3e, "KIMD"), \ | ||
159 | exit_code_ipa0(0xB9, 0x3f, "KLMD"), \ | ||
160 | exit_code_ipa0(0xB9, 0x8a, "CSPG"), \ | ||
161 | exit_code_ipa0(0xB9, 0x8d, "EPSW"), \ | ||
162 | exit_code_ipa0(0xB9, 0x8e, "IDTE"), \ | ||
163 | exit_code_ipa0(0xB9, 0x8f, "CRDTE"), \ | ||
164 | exit_code_ipa0(0xB9, 0x9c, "EQBS"), \ | ||
165 | exit_code_ipa0(0xB9, 0xa2, "PTF"), \ | ||
166 | exit_code_ipa0(0xB9, 0xab, "ESSA"), \ | ||
167 | exit_code_ipa0(0xB9, 0xae, "RRBM"), \ | ||
168 | exit_code_ipa0(0xB9, 0xaf, "PFMF"), \ | ||
169 | exit_code_ipa0(0xE3, 0x03, "LRAG"), \ | ||
170 | exit_code_ipa0(0xE3, 0x13, "LRAY"), \ | ||
171 | exit_code_ipa0(0xE3, 0x25, "NTSTG"), \ | ||
172 | exit_code_ipa0(0xE5, 0x00, "LASP"), \ | ||
173 | exit_code_ipa0(0xE5, 0x01, "TPROT"), \ | ||
174 | exit_code_ipa0(0xE5, 0x60, "TBEGIN"), \ | ||
175 | exit_code_ipa0(0xE5, 0x61, "TBEGINC"), \ | ||
176 | exit_code_ipa0(0xEB, 0x25, "STCTG"), \ | ||
177 | exit_code_ipa0(0xEB, 0x2f, "LCTLG"), \ | ||
178 | exit_code_ipa0(0xEB, 0x60, "LRIC"), \ | ||
179 | exit_code_ipa0(0xEB, 0x61, "STRIC"), \ | ||
180 | exit_code_ipa0(0xEB, 0x62, "MRIC"), \ | ||
181 | exit_code_ipa0(0xEB, 0x8a, "SQBS"), \ | ||
182 | exit_code_ipa0(0xC8, 0x01, "ECTG"), \ | ||
183 | exit_code(0x0a, "SVC"), \ | ||
184 | exit_code(0x80, "SSM"), \ | ||
185 | exit_code(0x82, "LPSW"), \ | ||
186 | exit_code(0x83, "DIAG"), \ | ||
187 | exit_code(0xae, "SIGP"), \ | ||
188 | exit_code(0xac, "STNSM"), \ | ||
189 | exit_code(0xad, "STOSM"), \ | ||
190 | exit_code(0xb1, "LRA"), \ | ||
191 | exit_code(0xb6, "STCTL"), \ | ||
192 | exit_code(0xb7, "LCTL"), \ | ||
193 | exit_code(0xee, "PLO") | ||
194 | |||
195 | #define sie_intercept_code \ | ||
196 | { 0x00, "Host interruption" }, \ | ||
197 | { 0x04, "Instruction" }, \ | ||
198 | { 0x08, "Program interruption" }, \ | ||
199 | { 0x0c, "Instruction and program interruption" }, \ | ||
200 | { 0x10, "External request" }, \ | ||
201 | { 0x14, "External interruption" }, \ | ||
202 | { 0x18, "I/O request" }, \ | ||
203 | { 0x1c, "Wait state" }, \ | ||
204 | { 0x20, "Validity" }, \ | ||
205 | { 0x28, "Stop request" }, \ | ||
206 | { 0x2c, "Operation exception" }, \ | ||
207 | { 0x38, "Partial-execution" }, \ | ||
208 | { 0x3c, "I/O interruption" }, \ | ||
209 | { 0x40, "I/O instruction" }, \ | ||
210 | { 0x48, "Timing subset" } | ||
211 | |||
212 | /* | ||
213 | * This is the simple interceptable instructions decoder. | ||
214 | * | ||
215 | * It will be used as userspace interface and it can be used in places | ||
216 | * that does not allow to use general decoder functions, | ||
217 | * such as trace events declarations. | ||
218 | * | ||
219 | * Some userspace tools may want to parse this code | ||
220 | * and would be confused by switch(), if() and other statements, | ||
221 | * but they can understand conditional operator. | ||
222 | */ | ||
223 | #define INSN_DECODE_IPA0(ipa0, insn, rshift, mask) \ | ||
224 | (insn >> 56) == (ipa0) ? \ | ||
225 | ((ipa0 << 8) | ((insn >> rshift) & mask)) : | ||
226 | |||
227 | #define INSN_DECODE(insn) (insn >> 56) | ||
228 | |||
229 | /* | ||
230 | * The macro icpt_insn_decoder() takes an intercepted instruction | ||
231 | * and returns a key, which can be used to find a mnemonic name | ||
232 | * of the instruction in the icpt_insn_codes table. | ||
233 | */ | ||
234 | #define icpt_insn_decoder(insn) \ | ||
235 | INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \ | ||
236 | INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \ | ||
237 | INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \ | ||
238 | INSN_DECODE_IPA0(0xb9, insn, 48, 0xff) \ | ||
239 | INSN_DECODE_IPA0(0xe3, insn, 48, 0xff) \ | ||
240 | INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \ | ||
241 | INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \ | ||
242 | INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \ | ||
243 | INSN_DECODE(insn) | ||
244 | |||
245 | #endif /* _UAPI_ASM_S390_SIE_H */ | ||
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 0c070c44cde2..afe1715a4eb7 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -90,16 +90,22 @@ int main(void) | |||
90 | DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); | 90 | DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); |
91 | DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); | 91 | DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); |
92 | DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code)); | 92 | DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code)); |
93 | DEFINE(__LC_PER_CAUSE, offsetof(struct _lowcore, per_perc_atmid)); | 93 | DEFINE(__LC_MON_CLASS_NR, offsetof(struct _lowcore, mon_class_num)); |
94 | DEFINE(__LC_PER_CODE, offsetof(struct _lowcore, per_code)); | ||
95 | DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_atmid)); | ||
94 | DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); | 96 | DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); |
95 | DEFINE(__LC_PER_PAID, offsetof(struct _lowcore, per_access_id)); | 97 | DEFINE(__LC_EXC_ACCESS_ID, offsetof(struct _lowcore, exc_access_id)); |
96 | DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id)); | 98 | DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); |
99 | DEFINE(__LC_OP_ACCESS_ID, offsetof(struct _lowcore, op_access_id)); | ||
100 | DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_mode_id)); | ||
101 | DEFINE(__LC_MON_CODE, offsetof(struct _lowcore, monitor_code)); | ||
97 | DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); | 102 | DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); |
98 | DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); | 103 | DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); |
99 | DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm)); | 104 | DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm)); |
100 | DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word)); | 105 | DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word)); |
101 | DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list)); | 106 | DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list)); |
102 | DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code)); | 107 | DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code)); |
108 | DEFINE(__LC_MCCK_EXT_DAM_CODE, offsetof(struct _lowcore, external_damage_code)); | ||
103 | DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw)); | 109 | DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw)); |
104 | DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw)); | 110 | DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw)); |
105 | DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw)); | 111 | DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw)); |
@@ -157,6 +163,8 @@ int main(void) | |||
157 | #ifdef CONFIG_32BIT | 163 | #ifdef CONFIG_32BIT |
158 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); | 164 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); |
159 | #else /* CONFIG_32BIT */ | 165 | #else /* CONFIG_32BIT */ |
166 | DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); | ||
167 | DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); | ||
160 | DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); | 168 | DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); |
161 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); | 169 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); |
162 | DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); | 170 | DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 18e5af848f9a..70203265196f 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -389,8 +389,8 @@ ENTRY(pgm_check_handler) | |||
389 | jz pgm_kprobe | 389 | jz pgm_kprobe |
390 | oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP | 390 | oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP |
391 | mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS | 391 | mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS |
392 | mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE | 392 | mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE |
393 | mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID | 393 | mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID |
394 | 0: REENABLE_IRQS | 394 | 0: REENABLE_IRQS |
395 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 395 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
396 | l %r1,BASED(.Ljump_table) | 396 | l %r1,BASED(.Ljump_table) |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index c41f3f906720..f2e674c702e1 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -420,8 +420,8 @@ ENTRY(pgm_check_handler) | |||
420 | jz pgm_kprobe | 420 | jz pgm_kprobe |
421 | oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP | 421 | oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP |
422 | mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS | 422 | mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS |
423 | mvc __THREAD_per_cause(2,%r14),__LC_PER_CAUSE | 423 | mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE |
424 | mvc __THREAD_per_paid(1,%r14),__LC_PER_PAID | 424 | mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID |
425 | 0: REENABLE_IRQS | 425 | 0: REENABLE_IRQS |
426 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 426 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
427 | larl %r1,pgm_check_table | 427 | larl %r1,pgm_check_table |
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile index d3adb37e93a4..b3b553469650 100644 --- a/arch/s390/kvm/Makefile +++ b/arch/s390/kvm/Makefile | |||
@@ -11,5 +11,7 @@ common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqch | |||
11 | 11 | ||
12 | ccflags-y := -Ivirt/kvm -Iarch/s390/kvm | 12 | ccflags-y := -Ivirt/kvm -Iarch/s390/kvm |
13 | 13 | ||
14 | kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o diag.o | 14 | kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o |
15 | kvm-objs += diag.o gaccess.o guestdbg.o | ||
16 | |||
15 | obj-$(CONFIG_KVM) += kvm.o | 17 | obj-$(CONFIG_KVM) += kvm.o |
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 08dfc839a6cf..0161675878a2 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c | |||
@@ -23,7 +23,7 @@ | |||
23 | static int diag_release_pages(struct kvm_vcpu *vcpu) | 23 | static int diag_release_pages(struct kvm_vcpu *vcpu) |
24 | { | 24 | { |
25 | unsigned long start, end; | 25 | unsigned long start, end; |
26 | unsigned long prefix = vcpu->arch.sie_block->prefix; | 26 | unsigned long prefix = kvm_s390_get_prefix(vcpu); |
27 | 27 | ||
28 | start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; | 28 | start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; |
29 | end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; | 29 | end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; |
@@ -64,12 +64,12 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) | |||
64 | int rc; | 64 | int rc; |
65 | u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; | 65 | u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; |
66 | u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); | 66 | u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); |
67 | unsigned long hva_token = KVM_HVA_ERR_BAD; | ||
68 | 67 | ||
69 | if (vcpu->run->s.regs.gprs[rx] & 7) | 68 | if (vcpu->run->s.regs.gprs[rx] & 7) |
70 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 69 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
71 | if (copy_from_guest(vcpu, &parm, vcpu->run->s.regs.gprs[rx], sizeof(parm))) | 70 | rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm)); |
72 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 71 | if (rc) |
72 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
73 | if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) | 73 | if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) |
74 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 74 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
75 | 75 | ||
@@ -89,8 +89,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) | |||
89 | parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL) | 89 | parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL) |
90 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 90 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
91 | 91 | ||
92 | hva_token = gfn_to_hva(vcpu->kvm, gpa_to_gfn(parm.token_addr)); | 92 | if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr)) |
93 | if (kvm_is_error_hva(hva_token)) | ||
94 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 93 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
95 | 94 | ||
96 | vcpu->arch.pfault_token = parm.token_addr; | 95 | vcpu->arch.pfault_token = parm.token_addr; |
@@ -167,23 +166,17 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) | |||
167 | 166 | ||
168 | VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); | 167 | VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); |
169 | switch (subcode) { | 168 | switch (subcode) { |
170 | case 0: | ||
171 | case 1: | ||
172 | page_table_reset_pgste(current->mm, 0, TASK_SIZE); | ||
173 | return -EOPNOTSUPP; | ||
174 | case 3: | 169 | case 3: |
175 | vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; | 170 | vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; |
176 | page_table_reset_pgste(current->mm, 0, TASK_SIZE); | ||
177 | break; | 171 | break; |
178 | case 4: | 172 | case 4: |
179 | vcpu->run->s390_reset_flags = 0; | 173 | vcpu->run->s390_reset_flags = 0; |
180 | page_table_reset_pgste(current->mm, 0, TASK_SIZE); | ||
181 | break; | 174 | break; |
182 | default: | 175 | default: |
183 | return -EOPNOTSUPP; | 176 | return -EOPNOTSUPP; |
184 | } | 177 | } |
185 | 178 | ||
186 | atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | 179 | kvm_s390_vcpu_stop(vcpu); |
187 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; | 180 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; |
188 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; | 181 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; |
189 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; | 182 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; |
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c new file mode 100644 index 000000000000..4653ac6e182b --- /dev/null +++ b/arch/s390/kvm/gaccess.c | |||
@@ -0,0 +1,726 @@ | |||
1 | /* | ||
2 | * guest access functions | ||
3 | * | ||
4 | * Copyright IBM Corp. 2014 | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/vmalloc.h> | ||
9 | #include <linux/err.h> | ||
10 | #include <asm/pgtable.h> | ||
11 | #include "kvm-s390.h" | ||
12 | #include "gaccess.h" | ||
13 | |||
14 | union asce { | ||
15 | unsigned long val; | ||
16 | struct { | ||
17 | unsigned long origin : 52; /* Region- or Segment-Table Origin */ | ||
18 | unsigned long : 2; | ||
19 | unsigned long g : 1; /* Subspace Group Control */ | ||
20 | unsigned long p : 1; /* Private Space Control */ | ||
21 | unsigned long s : 1; /* Storage-Alteration-Event Control */ | ||
22 | unsigned long x : 1; /* Space-Switch-Event Control */ | ||
23 | unsigned long r : 1; /* Real-Space Control */ | ||
24 | unsigned long : 1; | ||
25 | unsigned long dt : 2; /* Designation-Type Control */ | ||
26 | unsigned long tl : 2; /* Region- or Segment-Table Length */ | ||
27 | }; | ||
28 | }; | ||
29 | |||
30 | enum { | ||
31 | ASCE_TYPE_SEGMENT = 0, | ||
32 | ASCE_TYPE_REGION3 = 1, | ||
33 | ASCE_TYPE_REGION2 = 2, | ||
34 | ASCE_TYPE_REGION1 = 3 | ||
35 | }; | ||
36 | |||
37 | union region1_table_entry { | ||
38 | unsigned long val; | ||
39 | struct { | ||
40 | unsigned long rto: 52;/* Region-Table Origin */ | ||
41 | unsigned long : 2; | ||
42 | unsigned long p : 1; /* DAT-Protection Bit */ | ||
43 | unsigned long : 1; | ||
44 | unsigned long tf : 2; /* Region-Second-Table Offset */ | ||
45 | unsigned long i : 1; /* Region-Invalid Bit */ | ||
46 | unsigned long : 1; | ||
47 | unsigned long tt : 2; /* Table-Type Bits */ | ||
48 | unsigned long tl : 2; /* Region-Second-Table Length */ | ||
49 | }; | ||
50 | }; | ||
51 | |||
52 | union region2_table_entry { | ||
53 | unsigned long val; | ||
54 | struct { | ||
55 | unsigned long rto: 52;/* Region-Table Origin */ | ||
56 | unsigned long : 2; | ||
57 | unsigned long p : 1; /* DAT-Protection Bit */ | ||
58 | unsigned long : 1; | ||
59 | unsigned long tf : 2; /* Region-Third-Table Offset */ | ||
60 | unsigned long i : 1; /* Region-Invalid Bit */ | ||
61 | unsigned long : 1; | ||
62 | unsigned long tt : 2; /* Table-Type Bits */ | ||
63 | unsigned long tl : 2; /* Region-Third-Table Length */ | ||
64 | }; | ||
65 | }; | ||
66 | |||
67 | struct region3_table_entry_fc0 { | ||
68 | unsigned long sto: 52;/* Segment-Table Origin */ | ||
69 | unsigned long : 1; | ||
70 | unsigned long fc : 1; /* Format-Control */ | ||
71 | unsigned long p : 1; /* DAT-Protection Bit */ | ||
72 | unsigned long : 1; | ||
73 | unsigned long tf : 2; /* Segment-Table Offset */ | ||
74 | unsigned long i : 1; /* Region-Invalid Bit */ | ||
75 | unsigned long cr : 1; /* Common-Region Bit */ | ||
76 | unsigned long tt : 2; /* Table-Type Bits */ | ||
77 | unsigned long tl : 2; /* Segment-Table Length */ | ||
78 | }; | ||
79 | |||
80 | struct region3_table_entry_fc1 { | ||
81 | unsigned long rfaa : 33; /* Region-Frame Absolute Address */ | ||
82 | unsigned long : 14; | ||
83 | unsigned long av : 1; /* ACCF-Validity Control */ | ||
84 | unsigned long acc: 4; /* Access-Control Bits */ | ||
85 | unsigned long f : 1; /* Fetch-Protection Bit */ | ||
86 | unsigned long fc : 1; /* Format-Control */ | ||
87 | unsigned long p : 1; /* DAT-Protection Bit */ | ||
88 | unsigned long co : 1; /* Change-Recording Override */ | ||
89 | unsigned long : 2; | ||
90 | unsigned long i : 1; /* Region-Invalid Bit */ | ||
91 | unsigned long cr : 1; /* Common-Region Bit */ | ||
92 | unsigned long tt : 2; /* Table-Type Bits */ | ||
93 | unsigned long : 2; | ||
94 | }; | ||
95 | |||
96 | union region3_table_entry { | ||
97 | unsigned long val; | ||
98 | struct region3_table_entry_fc0 fc0; | ||
99 | struct region3_table_entry_fc1 fc1; | ||
100 | struct { | ||
101 | unsigned long : 53; | ||
102 | unsigned long fc : 1; /* Format-Control */ | ||
103 | unsigned long : 4; | ||
104 | unsigned long i : 1; /* Region-Invalid Bit */ | ||
105 | unsigned long cr : 1; /* Common-Region Bit */ | ||
106 | unsigned long tt : 2; /* Table-Type Bits */ | ||
107 | unsigned long : 2; | ||
108 | }; | ||
109 | }; | ||
110 | |||
111 | struct segment_entry_fc0 { | ||
112 | unsigned long pto: 53;/* Page-Table Origin */ | ||
113 | unsigned long fc : 1; /* Format-Control */ | ||
114 | unsigned long p : 1; /* DAT-Protection Bit */ | ||
115 | unsigned long : 3; | ||
116 | unsigned long i : 1; /* Segment-Invalid Bit */ | ||
117 | unsigned long cs : 1; /* Common-Segment Bit */ | ||
118 | unsigned long tt : 2; /* Table-Type Bits */ | ||
119 | unsigned long : 2; | ||
120 | }; | ||
121 | |||
122 | struct segment_entry_fc1 { | ||
123 | unsigned long sfaa : 44; /* Segment-Frame Absolute Address */ | ||
124 | unsigned long : 3; | ||
125 | unsigned long av : 1; /* ACCF-Validity Control */ | ||
126 | unsigned long acc: 4; /* Access-Control Bits */ | ||
127 | unsigned long f : 1; /* Fetch-Protection Bit */ | ||
128 | unsigned long fc : 1; /* Format-Control */ | ||
129 | unsigned long p : 1; /* DAT-Protection Bit */ | ||
130 | unsigned long co : 1; /* Change-Recording Override */ | ||
131 | unsigned long : 2; | ||
132 | unsigned long i : 1; /* Segment-Invalid Bit */ | ||
133 | unsigned long cs : 1; /* Common-Segment Bit */ | ||
134 | unsigned long tt : 2; /* Table-Type Bits */ | ||
135 | unsigned long : 2; | ||
136 | }; | ||
137 | |||
138 | union segment_table_entry { | ||
139 | unsigned long val; | ||
140 | struct segment_entry_fc0 fc0; | ||
141 | struct segment_entry_fc1 fc1; | ||
142 | struct { | ||
143 | unsigned long : 53; | ||
144 | unsigned long fc : 1; /* Format-Control */ | ||
145 | unsigned long : 4; | ||
146 | unsigned long i : 1; /* Segment-Invalid Bit */ | ||
147 | unsigned long cs : 1; /* Common-Segment Bit */ | ||
148 | unsigned long tt : 2; /* Table-Type Bits */ | ||
149 | unsigned long : 2; | ||
150 | }; | ||
151 | }; | ||
152 | |||
153 | enum { | ||
154 | TABLE_TYPE_SEGMENT = 0, | ||
155 | TABLE_TYPE_REGION3 = 1, | ||
156 | TABLE_TYPE_REGION2 = 2, | ||
157 | TABLE_TYPE_REGION1 = 3 | ||
158 | }; | ||
159 | |||
160 | union page_table_entry { | ||
161 | unsigned long val; | ||
162 | struct { | ||
163 | unsigned long pfra : 52; /* Page-Frame Real Address */ | ||
164 | unsigned long z : 1; /* Zero Bit */ | ||
165 | unsigned long i : 1; /* Page-Invalid Bit */ | ||
166 | unsigned long p : 1; /* DAT-Protection Bit */ | ||
167 | unsigned long co : 1; /* Change-Recording Override */ | ||
168 | unsigned long : 8; | ||
169 | }; | ||
170 | }; | ||
171 | |||
172 | /* | ||
173 | * vaddress union in order to easily decode a virtual address into its | ||
174 | * region first index, region second index etc. parts. | ||
175 | */ | ||
176 | union vaddress { | ||
177 | unsigned long addr; | ||
178 | struct { | ||
179 | unsigned long rfx : 11; | ||
180 | unsigned long rsx : 11; | ||
181 | unsigned long rtx : 11; | ||
182 | unsigned long sx : 11; | ||
183 | unsigned long px : 8; | ||
184 | unsigned long bx : 12; | ||
185 | }; | ||
186 | struct { | ||
187 | unsigned long rfx01 : 2; | ||
188 | unsigned long : 9; | ||
189 | unsigned long rsx01 : 2; | ||
190 | unsigned long : 9; | ||
191 | unsigned long rtx01 : 2; | ||
192 | unsigned long : 9; | ||
193 | unsigned long sx01 : 2; | ||
194 | unsigned long : 29; | ||
195 | }; | ||
196 | }; | ||
197 | |||
198 | /* | ||
199 | * raddress union which will contain the result (real or absolute address) | ||
200 | * after a page table walk. The rfaa, sfaa and pfra members are used to | ||
201 | * simply assign them the value of a region, segment or page table entry. | ||
202 | */ | ||
203 | union raddress { | ||
204 | unsigned long addr; | ||
205 | unsigned long rfaa : 33; /* Region-Frame Absolute Address */ | ||
206 | unsigned long sfaa : 44; /* Segment-Frame Absolute Address */ | ||
207 | unsigned long pfra : 52; /* Page-Frame Real Address */ | ||
208 | }; | ||
209 | |||
210 | static int ipte_lock_count; | ||
211 | static DEFINE_MUTEX(ipte_mutex); | ||
212 | |||
213 | int ipte_lock_held(struct kvm_vcpu *vcpu) | ||
214 | { | ||
215 | union ipte_control *ic = &vcpu->kvm->arch.sca->ipte_control; | ||
216 | |||
217 | if (vcpu->arch.sie_block->eca & 1) | ||
218 | return ic->kh != 0; | ||
219 | return ipte_lock_count != 0; | ||
220 | } | ||
221 | |||
222 | static void ipte_lock_simple(struct kvm_vcpu *vcpu) | ||
223 | { | ||
224 | union ipte_control old, new, *ic; | ||
225 | |||
226 | mutex_lock(&ipte_mutex); | ||
227 | ipte_lock_count++; | ||
228 | if (ipte_lock_count > 1) | ||
229 | goto out; | ||
230 | ic = &vcpu->kvm->arch.sca->ipte_control; | ||
231 | do { | ||
232 | old = ACCESS_ONCE(*ic); | ||
233 | while (old.k) { | ||
234 | cond_resched(); | ||
235 | old = ACCESS_ONCE(*ic); | ||
236 | } | ||
237 | new = old; | ||
238 | new.k = 1; | ||
239 | } while (cmpxchg(&ic->val, old.val, new.val) != old.val); | ||
240 | out: | ||
241 | mutex_unlock(&ipte_mutex); | ||
242 | } | ||
243 | |||
244 | static void ipte_unlock_simple(struct kvm_vcpu *vcpu) | ||
245 | { | ||
246 | union ipte_control old, new, *ic; | ||
247 | |||
248 | mutex_lock(&ipte_mutex); | ||
249 | ipte_lock_count--; | ||
250 | if (ipte_lock_count) | ||
251 | goto out; | ||
252 | ic = &vcpu->kvm->arch.sca->ipte_control; | ||
253 | do { | ||
254 | new = old = ACCESS_ONCE(*ic); | ||
255 | new.k = 0; | ||
256 | } while (cmpxchg(&ic->val, old.val, new.val) != old.val); | ||
257 | if (!ipte_lock_count) | ||
258 | wake_up(&vcpu->kvm->arch.ipte_wq); | ||
259 | out: | ||
260 | mutex_unlock(&ipte_mutex); | ||
261 | } | ||
262 | |||
263 | static void ipte_lock_siif(struct kvm_vcpu *vcpu) | ||
264 | { | ||
265 | union ipte_control old, new, *ic; | ||
266 | |||
267 | ic = &vcpu->kvm->arch.sca->ipte_control; | ||
268 | do { | ||
269 | old = ACCESS_ONCE(*ic); | ||
270 | while (old.kg) { | ||
271 | cond_resched(); | ||
272 | old = ACCESS_ONCE(*ic); | ||
273 | } | ||
274 | new = old; | ||
275 | new.k = 1; | ||
276 | new.kh++; | ||
277 | } while (cmpxchg(&ic->val, old.val, new.val) != old.val); | ||
278 | } | ||
279 | |||
280 | static void ipte_unlock_siif(struct kvm_vcpu *vcpu) | ||
281 | { | ||
282 | union ipte_control old, new, *ic; | ||
283 | |||
284 | ic = &vcpu->kvm->arch.sca->ipte_control; | ||
285 | do { | ||
286 | new = old = ACCESS_ONCE(*ic); | ||
287 | new.kh--; | ||
288 | if (!new.kh) | ||
289 | new.k = 0; | ||
290 | } while (cmpxchg(&ic->val, old.val, new.val) != old.val); | ||
291 | if (!new.kh) | ||
292 | wake_up(&vcpu->kvm->arch.ipte_wq); | ||
293 | } | ||
294 | |||
295 | void ipte_lock(struct kvm_vcpu *vcpu) | ||
296 | { | ||
297 | if (vcpu->arch.sie_block->eca & 1) | ||
298 | ipte_lock_siif(vcpu); | ||
299 | else | ||
300 | ipte_lock_simple(vcpu); | ||
301 | } | ||
302 | |||
303 | void ipte_unlock(struct kvm_vcpu *vcpu) | ||
304 | { | ||
305 | if (vcpu->arch.sie_block->eca & 1) | ||
306 | ipte_unlock_siif(vcpu); | ||
307 | else | ||
308 | ipte_unlock_simple(vcpu); | ||
309 | } | ||
310 | |||
311 | static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu) | ||
312 | { | ||
313 | switch (psw_bits(vcpu->arch.sie_block->gpsw).as) { | ||
314 | case PSW_AS_PRIMARY: | ||
315 | return vcpu->arch.sie_block->gcr[1]; | ||
316 | case PSW_AS_SECONDARY: | ||
317 | return vcpu->arch.sie_block->gcr[7]; | ||
318 | case PSW_AS_HOME: | ||
319 | return vcpu->arch.sie_block->gcr[13]; | ||
320 | } | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) | ||
325 | { | ||
326 | return kvm_read_guest(kvm, gpa, val, sizeof(*val)); | ||
327 | } | ||
328 | |||
329 | /** | ||
330 | * guest_translate - translate a guest virtual into a guest absolute address | ||
331 | * @vcpu: virtual cpu | ||
332 | * @gva: guest virtual address | ||
333 | * @gpa: points to where guest physical (absolute) address should be stored | ||
334 | * @write: indicates if access is a write access | ||
335 | * | ||
336 | * Translate a guest virtual address into a guest absolute address by means | ||
337 | * of dynamic address translation as specified by the architecuture. | ||
338 | * If the resulting absolute address is not available in the configuration | ||
339 | * an addressing exception is indicated and @gpa will not be changed. | ||
340 | * | ||
341 | * Returns: - zero on success; @gpa contains the resulting absolute address | ||
342 | * - a negative value if guest access failed due to e.g. broken | ||
343 | * guest mapping | ||
344 | * - a positve value if an access exception happened. In this case | ||
345 | * the returned value is the program interruption code as defined | ||
346 | * by the architecture | ||
347 | */ | ||
348 | static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, | ||
349 | unsigned long *gpa, int write) | ||
350 | { | ||
351 | union vaddress vaddr = {.addr = gva}; | ||
352 | union raddress raddr = {.addr = gva}; | ||
353 | union page_table_entry pte; | ||
354 | int dat_protection = 0; | ||
355 | union ctlreg0 ctlreg0; | ||
356 | unsigned long ptr; | ||
357 | int edat1, edat2; | ||
358 | union asce asce; | ||
359 | |||
360 | ctlreg0.val = vcpu->arch.sie_block->gcr[0]; | ||
361 | edat1 = ctlreg0.edat && test_vfacility(8); | ||
362 | edat2 = edat1 && test_vfacility(78); | ||
363 | asce.val = get_vcpu_asce(vcpu); | ||
364 | if (asce.r) | ||
365 | goto real_address; | ||
366 | ptr = asce.origin * 4096; | ||
367 | switch (asce.dt) { | ||
368 | case ASCE_TYPE_REGION1: | ||
369 | if (vaddr.rfx01 > asce.tl) | ||
370 | return PGM_REGION_FIRST_TRANS; | ||
371 | ptr += vaddr.rfx * 8; | ||
372 | break; | ||
373 | case ASCE_TYPE_REGION2: | ||
374 | if (vaddr.rfx) | ||
375 | return PGM_ASCE_TYPE; | ||
376 | if (vaddr.rsx01 > asce.tl) | ||
377 | return PGM_REGION_SECOND_TRANS; | ||
378 | ptr += vaddr.rsx * 8; | ||
379 | break; | ||
380 | case ASCE_TYPE_REGION3: | ||
381 | if (vaddr.rfx || vaddr.rsx) | ||
382 | return PGM_ASCE_TYPE; | ||
383 | if (vaddr.rtx01 > asce.tl) | ||
384 | return PGM_REGION_THIRD_TRANS; | ||
385 | ptr += vaddr.rtx * 8; | ||
386 | break; | ||
387 | case ASCE_TYPE_SEGMENT: | ||
388 | if (vaddr.rfx || vaddr.rsx || vaddr.rtx) | ||
389 | return PGM_ASCE_TYPE; | ||
390 | if (vaddr.sx01 > asce.tl) | ||
391 | return PGM_SEGMENT_TRANSLATION; | ||
392 | ptr += vaddr.sx * 8; | ||
393 | break; | ||
394 | } | ||
395 | switch (asce.dt) { | ||
396 | case ASCE_TYPE_REGION1: { | ||
397 | union region1_table_entry rfte; | ||
398 | |||
399 | if (kvm_is_error_gpa(vcpu->kvm, ptr)) | ||
400 | return PGM_ADDRESSING; | ||
401 | if (deref_table(vcpu->kvm, ptr, &rfte.val)) | ||
402 | return -EFAULT; | ||
403 | if (rfte.i) | ||
404 | return PGM_REGION_FIRST_TRANS; | ||
405 | if (rfte.tt != TABLE_TYPE_REGION1) | ||
406 | return PGM_TRANSLATION_SPEC; | ||
407 | if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl) | ||
408 | return PGM_REGION_SECOND_TRANS; | ||
409 | if (edat1) | ||
410 | dat_protection |= rfte.p; | ||
411 | ptr = rfte.rto * 4096 + vaddr.rsx * 8; | ||
412 | } | ||
413 | /* fallthrough */ | ||
414 | case ASCE_TYPE_REGION2: { | ||
415 | union region2_table_entry rste; | ||
416 | |||
417 | if (kvm_is_error_gpa(vcpu->kvm, ptr)) | ||
418 | return PGM_ADDRESSING; | ||
419 | if (deref_table(vcpu->kvm, ptr, &rste.val)) | ||
420 | return -EFAULT; | ||
421 | if (rste.i) | ||
422 | return PGM_REGION_SECOND_TRANS; | ||
423 | if (rste.tt != TABLE_TYPE_REGION2) | ||
424 | return PGM_TRANSLATION_SPEC; | ||
425 | if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl) | ||
426 | return PGM_REGION_THIRD_TRANS; | ||
427 | if (edat1) | ||
428 | dat_protection |= rste.p; | ||
429 | ptr = rste.rto * 4096 + vaddr.rtx * 8; | ||
430 | } | ||
431 | /* fallthrough */ | ||
432 | case ASCE_TYPE_REGION3: { | ||
433 | union region3_table_entry rtte; | ||
434 | |||
435 | if (kvm_is_error_gpa(vcpu->kvm, ptr)) | ||
436 | return PGM_ADDRESSING; | ||
437 | if (deref_table(vcpu->kvm, ptr, &rtte.val)) | ||
438 | return -EFAULT; | ||
439 | if (rtte.i) | ||
440 | return PGM_REGION_THIRD_TRANS; | ||
441 | if (rtte.tt != TABLE_TYPE_REGION3) | ||
442 | return PGM_TRANSLATION_SPEC; | ||
443 | if (rtte.cr && asce.p && edat2) | ||
444 | return PGM_TRANSLATION_SPEC; | ||
445 | if (rtte.fc && edat2) { | ||
446 | dat_protection |= rtte.fc1.p; | ||
447 | raddr.rfaa = rtte.fc1.rfaa; | ||
448 | goto absolute_address; | ||
449 | } | ||
450 | if (vaddr.sx01 < rtte.fc0.tf) | ||
451 | return PGM_SEGMENT_TRANSLATION; | ||
452 | if (vaddr.sx01 > rtte.fc0.tl) | ||
453 | return PGM_SEGMENT_TRANSLATION; | ||
454 | if (edat1) | ||
455 | dat_protection |= rtte.fc0.p; | ||
456 | ptr = rtte.fc0.sto * 4096 + vaddr.sx * 8; | ||
457 | } | ||
458 | /* fallthrough */ | ||
459 | case ASCE_TYPE_SEGMENT: { | ||
460 | union segment_table_entry ste; | ||
461 | |||
462 | if (kvm_is_error_gpa(vcpu->kvm, ptr)) | ||
463 | return PGM_ADDRESSING; | ||
464 | if (deref_table(vcpu->kvm, ptr, &ste.val)) | ||
465 | return -EFAULT; | ||
466 | if (ste.i) | ||
467 | return PGM_SEGMENT_TRANSLATION; | ||
468 | if (ste.tt != TABLE_TYPE_SEGMENT) | ||
469 | return PGM_TRANSLATION_SPEC; | ||
470 | if (ste.cs && asce.p) | ||
471 | return PGM_TRANSLATION_SPEC; | ||
472 | if (ste.fc && edat1) { | ||
473 | dat_protection |= ste.fc1.p; | ||
474 | raddr.sfaa = ste.fc1.sfaa; | ||
475 | goto absolute_address; | ||
476 | } | ||
477 | dat_protection |= ste.fc0.p; | ||
478 | ptr = ste.fc0.pto * 2048 + vaddr.px * 8; | ||
479 | } | ||
480 | } | ||
481 | if (kvm_is_error_gpa(vcpu->kvm, ptr)) | ||
482 | return PGM_ADDRESSING; | ||
483 | if (deref_table(vcpu->kvm, ptr, &pte.val)) | ||
484 | return -EFAULT; | ||
485 | if (pte.i) | ||
486 | return PGM_PAGE_TRANSLATION; | ||
487 | if (pte.z) | ||
488 | return PGM_TRANSLATION_SPEC; | ||
489 | if (pte.co && !edat1) | ||
490 | return PGM_TRANSLATION_SPEC; | ||
491 | dat_protection |= pte.p; | ||
492 | raddr.pfra = pte.pfra; | ||
493 | real_address: | ||
494 | raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr); | ||
495 | absolute_address: | ||
496 | if (write && dat_protection) | ||
497 | return PGM_PROTECTION; | ||
498 | if (kvm_is_error_gpa(vcpu->kvm, raddr.addr)) | ||
499 | return PGM_ADDRESSING; | ||
500 | *gpa = raddr.addr; | ||
501 | return 0; | ||
502 | } | ||
503 | |||
504 | static inline int is_low_address(unsigned long ga) | ||
505 | { | ||
506 | /* Check for address ranges 0..511 and 4096..4607 */ | ||
507 | return (ga & ~0x11fful) == 0; | ||
508 | } | ||
509 | |||
510 | static int low_address_protection_enabled(struct kvm_vcpu *vcpu) | ||
511 | { | ||
512 | union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; | ||
513 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | ||
514 | union asce asce; | ||
515 | |||
516 | if (!ctlreg0.lap) | ||
517 | return 0; | ||
518 | asce.val = get_vcpu_asce(vcpu); | ||
519 | if (psw_bits(*psw).t && asce.p) | ||
520 | return 0; | ||
521 | return 1; | ||
522 | } | ||
523 | |||
524 | struct trans_exc_code_bits { | ||
525 | unsigned long addr : 52; /* Translation-exception Address */ | ||
526 | unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */ | ||
527 | unsigned long : 7; | ||
528 | unsigned long b61 : 1; | ||
529 | unsigned long as : 2; /* ASCE Identifier */ | ||
530 | }; | ||
531 | |||
532 | enum { | ||
533 | FSI_UNKNOWN = 0, /* Unknown wether fetch or store */ | ||
534 | FSI_STORE = 1, /* Exception was due to store operation */ | ||
535 | FSI_FETCH = 2 /* Exception was due to fetch operation */ | ||
536 | }; | ||
537 | |||
538 | static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, | ||
539 | unsigned long *pages, unsigned long nr_pages, | ||
540 | int write) | ||
541 | { | ||
542 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; | ||
543 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | ||
544 | struct trans_exc_code_bits *tec_bits; | ||
545 | int lap_enabled, rc; | ||
546 | |||
547 | memset(pgm, 0, sizeof(*pgm)); | ||
548 | tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; | ||
549 | tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; | ||
550 | tec_bits->as = psw_bits(*psw).as; | ||
551 | lap_enabled = low_address_protection_enabled(vcpu); | ||
552 | while (nr_pages) { | ||
553 | ga = kvm_s390_logical_to_effective(vcpu, ga); | ||
554 | tec_bits->addr = ga >> PAGE_SHIFT; | ||
555 | if (write && lap_enabled && is_low_address(ga)) { | ||
556 | pgm->code = PGM_PROTECTION; | ||
557 | return pgm->code; | ||
558 | } | ||
559 | ga &= PAGE_MASK; | ||
560 | if (psw_bits(*psw).t) { | ||
561 | rc = guest_translate(vcpu, ga, pages, write); | ||
562 | if (rc < 0) | ||
563 | return rc; | ||
564 | if (rc == PGM_PROTECTION) | ||
565 | tec_bits->b61 = 1; | ||
566 | if (rc) | ||
567 | pgm->code = rc; | ||
568 | } else { | ||
569 | *pages = kvm_s390_real_to_abs(vcpu, ga); | ||
570 | if (kvm_is_error_gpa(vcpu->kvm, *pages)) | ||
571 | pgm->code = PGM_ADDRESSING; | ||
572 | } | ||
573 | if (pgm->code) | ||
574 | return pgm->code; | ||
575 | ga += PAGE_SIZE; | ||
576 | pages++; | ||
577 | nr_pages--; | ||
578 | } | ||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | ||
583 | unsigned long len, int write) | ||
584 | { | ||
585 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | ||
586 | unsigned long _len, nr_pages, gpa, idx; | ||
587 | unsigned long pages_array[2]; | ||
588 | unsigned long *pages; | ||
589 | int need_ipte_lock; | ||
590 | union asce asce; | ||
591 | int rc; | ||
592 | |||
593 | if (!len) | ||
594 | return 0; | ||
595 | /* Access register mode is not supported yet. */ | ||
596 | if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG) | ||
597 | return -EOPNOTSUPP; | ||
598 | nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; | ||
599 | pages = pages_array; | ||
600 | if (nr_pages > ARRAY_SIZE(pages_array)) | ||
601 | pages = vmalloc(nr_pages * sizeof(unsigned long)); | ||
602 | if (!pages) | ||
603 | return -ENOMEM; | ||
604 | asce.val = get_vcpu_asce(vcpu); | ||
605 | need_ipte_lock = psw_bits(*psw).t && !asce.r; | ||
606 | if (need_ipte_lock) | ||
607 | ipte_lock(vcpu); | ||
608 | rc = guest_page_range(vcpu, ga, pages, nr_pages, write); | ||
609 | for (idx = 0; idx < nr_pages && !rc; idx++) { | ||
610 | gpa = *(pages + idx) + (ga & ~PAGE_MASK); | ||
611 | _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); | ||
612 | if (write) | ||
613 | rc = kvm_write_guest(vcpu->kvm, gpa, data, _len); | ||
614 | else | ||
615 | rc = kvm_read_guest(vcpu->kvm, gpa, data, _len); | ||
616 | len -= _len; | ||
617 | ga += _len; | ||
618 | data += _len; | ||
619 | } | ||
620 | if (need_ipte_lock) | ||
621 | ipte_unlock(vcpu); | ||
622 | if (nr_pages > ARRAY_SIZE(pages_array)) | ||
623 | vfree(pages); | ||
624 | return rc; | ||
625 | } | ||
626 | |||
627 | int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, | ||
628 | void *data, unsigned long len, int write) | ||
629 | { | ||
630 | unsigned long _len, gpa; | ||
631 | int rc = 0; | ||
632 | |||
633 | while (len && !rc) { | ||
634 | gpa = kvm_s390_real_to_abs(vcpu, gra); | ||
635 | _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); | ||
636 | if (write) | ||
637 | rc = write_guest_abs(vcpu, gpa, data, _len); | ||
638 | else | ||
639 | rc = read_guest_abs(vcpu, gpa, data, _len); | ||
640 | len -= _len; | ||
641 | gra += _len; | ||
642 | data += _len; | ||
643 | } | ||
644 | return rc; | ||
645 | } | ||
646 | |||
647 | /** | ||
648 | * guest_translate_address - translate guest logical into guest absolute address | ||
649 | * | ||
650 | * Parameter semantics are the same as the ones from guest_translate. | ||
651 | * The memory contents at the guest address are not changed. | ||
652 | * | ||
653 | * Note: The IPTE lock is not taken during this function, so the caller | ||
654 | * has to take care of this. | ||
655 | */ | ||
656 | int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, | ||
657 | unsigned long *gpa, int write) | ||
658 | { | ||
659 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; | ||
660 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | ||
661 | struct trans_exc_code_bits *tec; | ||
662 | union asce asce; | ||
663 | int rc; | ||
664 | |||
665 | /* Access register mode is not supported yet. */ | ||
666 | if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG) | ||
667 | return -EOPNOTSUPP; | ||
668 | |||
669 | gva = kvm_s390_logical_to_effective(vcpu, gva); | ||
670 | memset(pgm, 0, sizeof(*pgm)); | ||
671 | tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code; | ||
672 | tec->as = psw_bits(*psw).as; | ||
673 | tec->fsi = write ? FSI_STORE : FSI_FETCH; | ||
674 | tec->addr = gva >> PAGE_SHIFT; | ||
675 | if (is_low_address(gva) && low_address_protection_enabled(vcpu)) { | ||
676 | if (write) { | ||
677 | rc = pgm->code = PGM_PROTECTION; | ||
678 | return rc; | ||
679 | } | ||
680 | } | ||
681 | |||
682 | asce.val = get_vcpu_asce(vcpu); | ||
683 | if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */ | ||
684 | rc = guest_translate(vcpu, gva, gpa, write); | ||
685 | if (rc > 0) { | ||
686 | if (rc == PGM_PROTECTION) | ||
687 | tec->b61 = 1; | ||
688 | pgm->code = rc; | ||
689 | } | ||
690 | } else { | ||
691 | rc = 0; | ||
692 | *gpa = kvm_s390_real_to_abs(vcpu, gva); | ||
693 | if (kvm_is_error_gpa(vcpu->kvm, *gpa)) | ||
694 | rc = pgm->code = PGM_ADDRESSING; | ||
695 | } | ||
696 | |||
697 | return rc; | ||
698 | } | ||
699 | |||
700 | /** | ||
701 | * kvm_s390_check_low_addr_protection - check for low-address protection | ||
702 | * @ga: Guest address | ||
703 | * | ||
704 | * Checks whether an address is subject to low-address protection and set | ||
705 | * up vcpu->arch.pgm accordingly if necessary. | ||
706 | * | ||
707 | * Return: 0 if no protection exception, or PGM_PROTECTION if protected. | ||
708 | */ | ||
709 | int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga) | ||
710 | { | ||
711 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; | ||
712 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | ||
713 | struct trans_exc_code_bits *tec_bits; | ||
714 | |||
715 | if (!is_low_address(ga) || !low_address_protection_enabled(vcpu)) | ||
716 | return 0; | ||
717 | |||
718 | memset(pgm, 0, sizeof(*pgm)); | ||
719 | tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; | ||
720 | tec_bits->fsi = FSI_STORE; | ||
721 | tec_bits->as = psw_bits(*psw).as; | ||
722 | tec_bits->addr = ga >> PAGE_SHIFT; | ||
723 | pgm->code = PGM_PROTECTION; | ||
724 | |||
725 | return pgm->code; | ||
726 | } | ||
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 374a439ccc60..0149cf15058a 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * access guest memory | 2 | * access guest memory |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2008, 2009 | 4 | * Copyright IBM Corp. 2008, 2014 |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License (version 2 only) | 7 | * it under the terms of the GNU General Public License (version 2 only) |
@@ -15,100 +15,321 @@ | |||
15 | 15 | ||
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/kvm_host.h> | 17 | #include <linux/kvm_host.h> |
18 | #include <asm/uaccess.h> | 18 | #include <linux/uaccess.h> |
19 | #include <linux/ptrace.h> | ||
19 | #include "kvm-s390.h" | 20 | #include "kvm-s390.h" |
20 | 21 | ||
21 | /* Convert real to absolute address by applying the prefix of the CPU */ | 22 | /** |
23 | * kvm_s390_real_to_abs - convert guest real address to guest absolute address | ||
24 | * @vcpu - guest virtual cpu | ||
25 | * @gra - guest real address | ||
26 | * | ||
27 | * Returns the guest absolute address that corresponds to the passed guest real | ||
28 | * address @gra of a virtual guest cpu by applying its prefix. | ||
29 | */ | ||
22 | static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, | 30 | static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, |
23 | unsigned long gaddr) | 31 | unsigned long gra) |
24 | { | 32 | { |
25 | unsigned long prefix = vcpu->arch.sie_block->prefix; | 33 | unsigned long prefix = kvm_s390_get_prefix(vcpu); |
26 | if (gaddr < 2 * PAGE_SIZE) | 34 | |
27 | gaddr += prefix; | 35 | if (gra < 2 * PAGE_SIZE) |
28 | else if (gaddr >= prefix && gaddr < prefix + 2 * PAGE_SIZE) | 36 | gra += prefix; |
29 | gaddr -= prefix; | 37 | else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE) |
30 | return gaddr; | 38 | gra -= prefix; |
39 | return gra; | ||
31 | } | 40 | } |
32 | 41 | ||
33 | static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu, | 42 | /** |
34 | void __user *gptr, | 43 | * kvm_s390_logical_to_effective - convert guest logical to effective address |
35 | int prefixing) | 44 | * @vcpu: guest virtual cpu |
45 | * @ga: guest logical address | ||
46 | * | ||
47 | * Convert a guest vcpu logical address to a guest vcpu effective address by | ||
48 | * applying the rules of the vcpu's addressing mode defined by PSW bits 31 | ||
49 | * and 32 (extendended/basic addressing mode). | ||
50 | * | ||
51 | * Depending on the vcpu's addressing mode the upper 40 bits (24 bit addressing | ||
52 | * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing mode) | ||
53 | * of @ga will be zeroed and the remaining bits will be returned. | ||
54 | */ | ||
55 | static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu, | ||
56 | unsigned long ga) | ||
36 | { | 57 | { |
37 | unsigned long gaddr = (unsigned long) gptr; | 58 | psw_t *psw = &vcpu->arch.sie_block->gpsw; |
38 | unsigned long uaddr; | 59 | |
39 | 60 | if (psw_bits(*psw).eaba == PSW_AMODE_64BIT) | |
40 | if (prefixing) | 61 | return ga; |
41 | gaddr = kvm_s390_real_to_abs(vcpu, gaddr); | 62 | if (psw_bits(*psw).eaba == PSW_AMODE_31BIT) |
42 | uaddr = gmap_fault(gaddr, vcpu->arch.gmap); | 63 | return ga & ((1UL << 31) - 1); |
43 | if (IS_ERR_VALUE(uaddr)) | 64 | return ga & ((1UL << 24) - 1); |
44 | uaddr = -EFAULT; | ||
45 | return (void __user *)uaddr; | ||
46 | } | 65 | } |
47 | 66 | ||
48 | #define get_guest(vcpu, x, gptr) \ | 67 | /* |
49 | ({ \ | 68 | * put_guest_lc, read_guest_lc and write_guest_lc are guest access functions |
50 | __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\ | 69 | * which shall only be used to access the lowcore of a vcpu. |
51 | int __mask = sizeof(__typeof__(*(gptr))) - 1; \ | 70 | * These functions should be used for e.g. interrupt handlers where no |
52 | int __ret; \ | 71 | * guest memory access protection facilities, like key or low address |
53 | \ | 72 | * protection, are applicable. |
54 | if (IS_ERR((void __force *)__uptr)) { \ | 73 | * At a later point guest vcpu lowcore access should happen via pinned |
55 | __ret = PTR_ERR((void __force *)__uptr); \ | 74 | * prefix pages, so that these pages can be accessed directly via the |
56 | } else { \ | 75 | * kernel mapping. All of these *_lc functions can be removed then. |
57 | BUG_ON((unsigned long)__uptr & __mask); \ | 76 | */ |
58 | __ret = get_user(x, __uptr); \ | ||
59 | } \ | ||
60 | __ret; \ | ||
61 | }) | ||
62 | 77 | ||
63 | #define put_guest(vcpu, x, gptr) \ | 78 | /** |
79 | * put_guest_lc - write a simple variable to a guest vcpu's lowcore | ||
80 | * @vcpu: virtual cpu | ||
81 | * @x: value to copy to guest | ||
82 | * @gra: vcpu's destination guest real address | ||
83 | * | ||
84 | * Copies a simple value from kernel space to a guest vcpu's lowcore. | ||
85 | * The size of the variable may be 1, 2, 4 or 8 bytes. The destination | ||
86 | * must be located in the vcpu's lowcore. Otherwise the result is undefined. | ||
87 | * | ||
88 | * Returns zero on success or -EFAULT on error. | ||
89 | * | ||
90 | * Note: an error indicates that either the kernel is out of memory or | ||
91 | * the guest memory mapping is broken. In any case the best solution | ||
92 | * would be to terminate the guest. | ||
93 | * It is wrong to inject a guest exception. | ||
94 | */ | ||
95 | #define put_guest_lc(vcpu, x, gra) \ | ||
64 | ({ \ | 96 | ({ \ |
65 | __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\ | 97 | struct kvm_vcpu *__vcpu = (vcpu); \ |
66 | int __mask = sizeof(__typeof__(*(gptr))) - 1; \ | 98 | __typeof__(*(gra)) __x = (x); \ |
67 | int __ret; \ | 99 | unsigned long __gpa; \ |
68 | \ | 100 | \ |
69 | if (IS_ERR((void __force *)__uptr)) { \ | 101 | __gpa = (unsigned long)(gra); \ |
70 | __ret = PTR_ERR((void __force *)__uptr); \ | 102 | __gpa += kvm_s390_get_prefix(__vcpu); \ |
71 | } else { \ | 103 | kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x)); \ |
72 | BUG_ON((unsigned long)__uptr & __mask); \ | ||
73 | __ret = put_user(x, __uptr); \ | ||
74 | } \ | ||
75 | __ret; \ | ||
76 | }) | 104 | }) |
77 | 105 | ||
78 | static inline int __copy_guest(struct kvm_vcpu *vcpu, unsigned long to, | 106 | /** |
79 | unsigned long from, unsigned long len, | 107 | * write_guest_lc - copy data from kernel space to guest vcpu's lowcore |
80 | int to_guest, int prefixing) | 108 | * @vcpu: virtual cpu |
109 | * @gra: vcpu's source guest real address | ||
110 | * @data: source address in kernel space | ||
111 | * @len: number of bytes to copy | ||
112 | * | ||
113 | * Copy data from kernel space to guest vcpu's lowcore. The entire range must | ||
114 | * be located within the vcpu's lowcore, otherwise the result is undefined. | ||
115 | * | ||
116 | * Returns zero on success or -EFAULT on error. | ||
117 | * | ||
118 | * Note: an error indicates that either the kernel is out of memory or | ||
119 | * the guest memory mapping is broken. In any case the best solution | ||
120 | * would be to terminate the guest. | ||
121 | * It is wrong to inject a guest exception. | ||
122 | */ | ||
123 | static inline __must_check | ||
124 | int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, | ||
125 | unsigned long len) | ||
126 | { | ||
127 | unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); | ||
128 | |||
129 | return kvm_write_guest(vcpu->kvm, gpa, data, len); | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * read_guest_lc - copy data from guest vcpu's lowcore to kernel space | ||
134 | * @vcpu: virtual cpu | ||
135 | * @gra: vcpu's source guest real address | ||
136 | * @data: destination address in kernel space | ||
137 | * @len: number of bytes to copy | ||
138 | * | ||
139 | * Copy data from guest vcpu's lowcore to kernel space. The entire range must | ||
140 | * be located within the vcpu's lowcore, otherwise the result is undefined. | ||
141 | * | ||
142 | * Returns zero on success or -EFAULT on error. | ||
143 | * | ||
144 | * Note: an error indicates that either the kernel is out of memory or | ||
145 | * the guest memory mapping is broken. In any case the best solution | ||
146 | * would be to terminate the guest. | ||
147 | * It is wrong to inject a guest exception. | ||
148 | */ | ||
149 | static inline __must_check | ||
150 | int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, | ||
151 | unsigned long len) | ||
152 | { | ||
153 | unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); | ||
154 | |||
155 | return kvm_read_guest(vcpu->kvm, gpa, data, len); | ||
156 | } | ||
157 | |||
158 | int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, | ||
159 | unsigned long *gpa, int write); | ||
160 | |||
161 | int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | ||
162 | unsigned long len, int write); | ||
163 | |||
164 | int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, | ||
165 | void *data, unsigned long len, int write); | ||
166 | |||
167 | /** | ||
168 | * write_guest - copy data from kernel space to guest space | ||
169 | * @vcpu: virtual cpu | ||
170 | * @ga: guest address | ||
171 | * @data: source address in kernel space | ||
172 | * @len: number of bytes to copy | ||
173 | * | ||
174 | * Copy @len bytes from @data (kernel space) to @ga (guest address). | ||
175 | * In order to copy data to guest space the PSW of the vcpu is inspected: | ||
176 | * If DAT is off data will be copied to guest real or absolute memory. | ||
177 | * If DAT is on data will be copied to the address space as specified by | ||
178 | * the address space bits of the PSW: | ||
179 | * Primary, secondory or home space (access register mode is currently not | ||
180 | * implemented). | ||
181 | * The addressing mode of the PSW is also inspected, so that address wrap | ||
182 | * around is taken into account for 24-, 31- and 64-bit addressing mode, | ||
183 | * if the to be copied data crosses page boundaries in guest address space. | ||
184 | * In addition also low address and DAT protection are inspected before | ||
185 | * copying any data (key protection is currently not implemented). | ||
186 | * | ||
187 | * This function modifies the 'struct kvm_s390_pgm_info pgm' member of @vcpu. | ||
188 | * In case of an access exception (e.g. protection exception) pgm will contain | ||
189 | * all data necessary so that a subsequent call to 'kvm_s390_inject_prog_vcpu()' | ||
190 | * will inject a correct exception into the guest. | ||
191 | * If no access exception happened, the contents of pgm are undefined when | ||
192 | * this function returns. | ||
193 | * | ||
194 | * Returns: - zero on success | ||
195 | * - a negative value if e.g. the guest mapping is broken or in | ||
196 | * case of out-of-memory. In this case the contents of pgm are | ||
197 | * undefined. Also parts of @data may have been copied to guest | ||
198 | * space. | ||
199 | * - a positive value if an access exception happened. In this case | ||
200 | * the returned value is the program interruption code and the | ||
201 | * contents of pgm may be used to inject an exception into the | ||
202 | * guest. No data has been copied to guest space. | ||
203 | * | ||
204 | * Note: in case an access exception is recognized no data has been copied to | ||
205 | * guest space (this is also true, if the to be copied data would cross | ||
206 | * one or more page boundaries in guest space). | ||
207 | * Therefore this function may be used for nullifying and suppressing | ||
208 | * instruction emulation. | ||
209 | * It may also be used for terminating instructions, if it is undefined | ||
210 | * if data has been changed in guest space in case of an exception. | ||
211 | */ | ||
212 | static inline __must_check | ||
213 | int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | ||
214 | unsigned long len) | ||
215 | { | ||
216 | return access_guest(vcpu, ga, data, len, 1); | ||
217 | } | ||
218 | |||
219 | /** | ||
220 | * read_guest - copy data from guest space to kernel space | ||
221 | * @vcpu: virtual cpu | ||
222 | * @ga: guest address | ||
223 | * @data: destination address in kernel space | ||
224 | * @len: number of bytes to copy | ||
225 | * | ||
226 | * Copy @len bytes from @ga (guest address) to @data (kernel space). | ||
227 | * | ||
228 | * The behaviour of read_guest is identical to write_guest, except that | ||
229 | * data will be copied from guest space to kernel space. | ||
230 | */ | ||
231 | static inline __must_check | ||
232 | int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | ||
233 | unsigned long len) | ||
234 | { | ||
235 | return access_guest(vcpu, ga, data, len, 0); | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * write_guest_abs - copy data from kernel space to guest space absolute | ||
240 | * @vcpu: virtual cpu | ||
241 | * @gpa: guest physical (absolute) address | ||
242 | * @data: source address in kernel space | ||
243 | * @len: number of bytes to copy | ||
244 | * | ||
245 | * Copy @len bytes from @data (kernel space) to @gpa (guest absolute address). | ||
246 | * It is up to the caller to ensure that the entire guest memory range is | ||
247 | * valid memory before calling this function. | ||
248 | * Guest low address and key protection are not checked. | ||
249 | * | ||
250 | * Returns zero on success or -EFAULT on error. | ||
251 | * | ||
252 | * If an error occurs data may have been copied partially to guest memory. | ||
253 | */ | ||
254 | static inline __must_check | ||
255 | int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data, | ||
256 | unsigned long len) | ||
257 | { | ||
258 | return kvm_write_guest(vcpu->kvm, gpa, data, len); | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * read_guest_abs - copy data from guest space absolute to kernel space | ||
263 | * @vcpu: virtual cpu | ||
264 | * @gpa: guest physical (absolute) address | ||
265 | * @data: destination address in kernel space | ||
266 | * @len: number of bytes to copy | ||
267 | * | ||
268 | * Copy @len bytes from @gpa (guest absolute address) to @data (kernel space). | ||
269 | * It is up to the caller to ensure that the entire guest memory range is | ||
270 | * valid memory before calling this function. | ||
271 | * Guest key protection is not checked. | ||
272 | * | ||
273 | * Returns zero on success or -EFAULT on error. | ||
274 | * | ||
275 | * If an error occurs data may have been copied partially to kernel space. | ||
276 | */ | ||
277 | static inline __must_check | ||
278 | int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data, | ||
279 | unsigned long len) | ||
280 | { | ||
281 | return kvm_read_guest(vcpu->kvm, gpa, data, len); | ||
282 | } | ||
283 | |||
284 | /** | ||
285 | * write_guest_real - copy data from kernel space to guest space real | ||
286 | * @vcpu: virtual cpu | ||
287 | * @gra: guest real address | ||
288 | * @data: source address in kernel space | ||
289 | * @len: number of bytes to copy | ||
290 | * | ||
291 | * Copy @len bytes from @data (kernel space) to @gra (guest real address). | ||
292 | * It is up to the caller to ensure that the entire guest memory range is | ||
293 | * valid memory before calling this function. | ||
294 | * Guest low address and key protection are not checked. | ||
295 | * | ||
296 | * Returns zero on success or -EFAULT on error. | ||
297 | * | ||
298 | * If an error occurs data may have been copied partially to guest memory. | ||
299 | */ | ||
300 | static inline __must_check | ||
301 | int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, | ||
302 | unsigned long len) | ||
303 | { | ||
304 | return access_guest_real(vcpu, gra, data, len, 1); | ||
305 | } | ||
306 | |||
307 | /** | ||
308 | * read_guest_real - copy data from guest space real to kernel space | ||
309 | * @vcpu: virtual cpu | ||
310 | * @gra: guest real address | ||
311 | * @data: destination address in kernel space | ||
312 | * @len: number of bytes to copy | ||
313 | * | ||
314 | * Copy @len bytes from @gra (guest real address) to @data (kernel space). | ||
315 | * It is up to the caller to ensure that the entire guest memory range is | ||
316 | * valid memory before calling this function. | ||
317 | * Guest key protection is not checked. | ||
318 | * | ||
319 | * Returns zero on success or -EFAULT on error. | ||
320 | * | ||
321 | * If an error occurs data may have been copied partially to kernel space. | ||
322 | */ | ||
323 | static inline __must_check | ||
324 | int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, | ||
325 | unsigned long len) | ||
81 | { | 326 | { |
82 | unsigned long _len, rc; | 327 | return access_guest_real(vcpu, gra, data, len, 0); |
83 | void __user *uptr; | ||
84 | |||
85 | while (len) { | ||
86 | uptr = to_guest ? (void __user *)to : (void __user *)from; | ||
87 | uptr = __gptr_to_uptr(vcpu, uptr, prefixing); | ||
88 | if (IS_ERR((void __force *)uptr)) | ||
89 | return -EFAULT; | ||
90 | _len = PAGE_SIZE - ((unsigned long)uptr & (PAGE_SIZE - 1)); | ||
91 | _len = min(_len, len); | ||
92 | if (to_guest) | ||
93 | rc = copy_to_user((void __user *) uptr, (void *)from, _len); | ||
94 | else | ||
95 | rc = copy_from_user((void *)to, (void __user *)uptr, _len); | ||
96 | if (rc) | ||
97 | return -EFAULT; | ||
98 | len -= _len; | ||
99 | from += _len; | ||
100 | to += _len; | ||
101 | } | ||
102 | return 0; | ||
103 | } | 328 | } |
104 | 329 | ||
105 | #define copy_to_guest(vcpu, to, from, size) \ | 330 | void ipte_lock(struct kvm_vcpu *vcpu); |
106 | __copy_guest(vcpu, to, (unsigned long)from, size, 1, 1) | 331 | void ipte_unlock(struct kvm_vcpu *vcpu); |
107 | #define copy_from_guest(vcpu, to, from, size) \ | 332 | int ipte_lock_held(struct kvm_vcpu *vcpu); |
108 | __copy_guest(vcpu, (unsigned long)to, from, size, 0, 1) | 333 | int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga); |
109 | #define copy_to_guest_absolute(vcpu, to, from, size) \ | ||
110 | __copy_guest(vcpu, to, (unsigned long)from, size, 1, 0) | ||
111 | #define copy_from_guest_absolute(vcpu, to, from, size) \ | ||
112 | __copy_guest(vcpu, (unsigned long)to, from, size, 0, 0) | ||
113 | 334 | ||
114 | #endif /* __KVM_S390_GACCESS_H */ | 335 | #endif /* __KVM_S390_GACCESS_H */ |
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c new file mode 100644 index 000000000000..3e8d4092ce30 --- /dev/null +++ b/arch/s390/kvm/guestdbg.c | |||
@@ -0,0 +1,482 @@ | |||
1 | /* | ||
2 | * kvm guest debug support | ||
3 | * | ||
4 | * Copyright IBM Corp. 2014 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License (version 2 only) | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> | ||
11 | */ | ||
12 | #include <linux/kvm_host.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include "kvm-s390.h" | ||
15 | #include "gaccess.h" | ||
16 | |||
17 | /* | ||
18 | * Extends the address range given by *start and *stop to include the address | ||
19 | * range starting with estart and the length len. Takes care of overflowing | ||
20 | * intervals and tries to minimize the overall intervall size. | ||
21 | */ | ||
22 | static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len) | ||
23 | { | ||
24 | u64 estop; | ||
25 | |||
26 | if (len > 0) | ||
27 | len--; | ||
28 | else | ||
29 | len = 0; | ||
30 | |||
31 | estop = estart + len; | ||
32 | |||
33 | /* 0-0 range represents "not set" */ | ||
34 | if ((*start == 0) && (*stop == 0)) { | ||
35 | *start = estart; | ||
36 | *stop = estop; | ||
37 | } else if (*start <= *stop) { | ||
38 | /* increase the existing range */ | ||
39 | if (estart < *start) | ||
40 | *start = estart; | ||
41 | if (estop > *stop) | ||
42 | *stop = estop; | ||
43 | } else { | ||
44 | /* "overflowing" interval, whereby *stop > *start */ | ||
45 | if (estart <= *stop) { | ||
46 | if (estop > *stop) | ||
47 | *stop = estop; | ||
48 | } else if (estop > *start) { | ||
49 | if (estart < *start) | ||
50 | *start = estart; | ||
51 | } | ||
52 | /* minimize the range */ | ||
53 | else if ((estop - *stop) < (*start - estart)) | ||
54 | *stop = estop; | ||
55 | else | ||
56 | *start = estart; | ||
57 | } | ||
58 | } | ||
59 | |||
60 | #define MAX_INST_SIZE 6 | ||
61 | |||
62 | static void enable_all_hw_bp(struct kvm_vcpu *vcpu) | ||
63 | { | ||
64 | unsigned long start, len; | ||
65 | u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; | ||
66 | u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; | ||
67 | u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; | ||
68 | int i; | ||
69 | |||
70 | if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || | ||
71 | vcpu->arch.guestdbg.hw_bp_info == NULL) | ||
72 | return; | ||
73 | |||
74 | /* | ||
75 | * If the guest is not interrested in branching events, we can savely | ||
76 | * limit them to the PER address range. | ||
77 | */ | ||
78 | if (!(*cr9 & PER_EVENT_BRANCH)) | ||
79 | *cr9 |= PER_CONTROL_BRANCH_ADDRESS; | ||
80 | *cr9 |= PER_EVENT_IFETCH | PER_EVENT_BRANCH; | ||
81 | |||
82 | for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { | ||
83 | start = vcpu->arch.guestdbg.hw_bp_info[i].addr; | ||
84 | len = vcpu->arch.guestdbg.hw_bp_info[i].len; | ||
85 | |||
86 | /* | ||
87 | * The instruction in front of the desired bp has to | ||
88 | * report instruction-fetching events | ||
89 | */ | ||
90 | if (start < MAX_INST_SIZE) { | ||
91 | len += start; | ||
92 | start = 0; | ||
93 | } else { | ||
94 | start -= MAX_INST_SIZE; | ||
95 | len += MAX_INST_SIZE; | ||
96 | } | ||
97 | |||
98 | extend_address_range(cr10, cr11, start, len); | ||
99 | } | ||
100 | } | ||
101 | |||
102 | static void enable_all_hw_wp(struct kvm_vcpu *vcpu) | ||
103 | { | ||
104 | unsigned long start, len; | ||
105 | u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; | ||
106 | u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; | ||
107 | u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; | ||
108 | int i; | ||
109 | |||
110 | if (vcpu->arch.guestdbg.nr_hw_wp <= 0 || | ||
111 | vcpu->arch.guestdbg.hw_wp_info == NULL) | ||
112 | return; | ||
113 | |||
114 | /* if host uses storage alternation for special address | ||
115 | * spaces, enable all events and give all to the guest */ | ||
116 | if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) { | ||
117 | *cr9 &= ~PER_CONTROL_ALTERATION; | ||
118 | *cr10 = 0; | ||
119 | *cr11 = PSW_ADDR_INSN; | ||
120 | } else { | ||
121 | *cr9 &= ~PER_CONTROL_ALTERATION; | ||
122 | *cr9 |= PER_EVENT_STORE; | ||
123 | |||
124 | for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { | ||
125 | start = vcpu->arch.guestdbg.hw_wp_info[i].addr; | ||
126 | len = vcpu->arch.guestdbg.hw_wp_info[i].len; | ||
127 | |||
128 | extend_address_range(cr10, cr11, start, len); | ||
129 | } | ||
130 | } | ||
131 | } | ||
132 | |||
133 | void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu) | ||
134 | { | ||
135 | vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0]; | ||
136 | vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9]; | ||
137 | vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10]; | ||
138 | vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11]; | ||
139 | } | ||
140 | |||
141 | void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu) | ||
142 | { | ||
143 | vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0; | ||
144 | vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9; | ||
145 | vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10; | ||
146 | vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11; | ||
147 | } | ||
148 | |||
149 | void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu) | ||
150 | { | ||
151 | /* | ||
152 | * TODO: if guest psw has per enabled, otherwise 0s! | ||
153 | * This reduces the amount of reported events. | ||
154 | * Need to intercept all psw changes! | ||
155 | */ | ||
156 | |||
157 | if (guestdbg_sstep_enabled(vcpu)) { | ||
158 | /* disable timer (clock-comparator) interrupts */ | ||
159 | vcpu->arch.sie_block->gcr[0] &= ~0x800ul; | ||
160 | vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH; | ||
161 | vcpu->arch.sie_block->gcr[10] = 0; | ||
162 | vcpu->arch.sie_block->gcr[11] = PSW_ADDR_INSN; | ||
163 | } | ||
164 | |||
165 | if (guestdbg_hw_bp_enabled(vcpu)) { | ||
166 | enable_all_hw_bp(vcpu); | ||
167 | enable_all_hw_wp(vcpu); | ||
168 | } | ||
169 | |||
170 | /* TODO: Instruction-fetching-nullification not allowed for now */ | ||
171 | if (vcpu->arch.sie_block->gcr[9] & PER_EVENT_NULLIFICATION) | ||
172 | vcpu->arch.sie_block->gcr[9] &= ~PER_EVENT_NULLIFICATION; | ||
173 | } | ||
174 | |||
175 | #define MAX_WP_SIZE 100 | ||
176 | |||
177 | static int __import_wp_info(struct kvm_vcpu *vcpu, | ||
178 | struct kvm_hw_breakpoint *bp_data, | ||
179 | struct kvm_hw_wp_info_arch *wp_info) | ||
180 | { | ||
181 | int ret = 0; | ||
182 | wp_info->len = bp_data->len; | ||
183 | wp_info->addr = bp_data->addr; | ||
184 | wp_info->phys_addr = bp_data->phys_addr; | ||
185 | wp_info->old_data = NULL; | ||
186 | |||
187 | if (wp_info->len < 0 || wp_info->len > MAX_WP_SIZE) | ||
188 | return -EINVAL; | ||
189 | |||
190 | wp_info->old_data = kmalloc(bp_data->len, GFP_KERNEL); | ||
191 | if (!wp_info->old_data) | ||
192 | return -ENOMEM; | ||
193 | /* try to backup the original value */ | ||
194 | ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data, | ||
195 | wp_info->len); | ||
196 | if (ret) { | ||
197 | kfree(wp_info->old_data); | ||
198 | wp_info->old_data = NULL; | ||
199 | } | ||
200 | |||
201 | return ret; | ||
202 | } | ||
203 | |||
204 | #define MAX_BP_COUNT 50 | ||
205 | |||
206 | int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu, | ||
207 | struct kvm_guest_debug *dbg) | ||
208 | { | ||
209 | int ret = 0, nr_wp = 0, nr_bp = 0, i, size; | ||
210 | struct kvm_hw_breakpoint *bp_data = NULL; | ||
211 | struct kvm_hw_wp_info_arch *wp_info = NULL; | ||
212 | struct kvm_hw_bp_info_arch *bp_info = NULL; | ||
213 | |||
214 | if (dbg->arch.nr_hw_bp <= 0 || !dbg->arch.hw_bp) | ||
215 | return 0; | ||
216 | else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT) | ||
217 | return -EINVAL; | ||
218 | |||
219 | size = dbg->arch.nr_hw_bp * sizeof(struct kvm_hw_breakpoint); | ||
220 | bp_data = kmalloc(size, GFP_KERNEL); | ||
221 | if (!bp_data) { | ||
222 | ret = -ENOMEM; | ||
223 | goto error; | ||
224 | } | ||
225 | |||
226 | if (copy_from_user(bp_data, dbg->arch.hw_bp, size)) { | ||
227 | ret = -EFAULT; | ||
228 | goto error; | ||
229 | } | ||
230 | |||
231 | for (i = 0; i < dbg->arch.nr_hw_bp; i++) { | ||
232 | switch (bp_data[i].type) { | ||
233 | case KVM_HW_WP_WRITE: | ||
234 | nr_wp++; | ||
235 | break; | ||
236 | case KVM_HW_BP: | ||
237 | nr_bp++; | ||
238 | break; | ||
239 | default: | ||
240 | break; | ||
241 | } | ||
242 | } | ||
243 | |||
244 | size = nr_wp * sizeof(struct kvm_hw_wp_info_arch); | ||
245 | if (size > 0) { | ||
246 | wp_info = kmalloc(size, GFP_KERNEL); | ||
247 | if (!wp_info) { | ||
248 | ret = -ENOMEM; | ||
249 | goto error; | ||
250 | } | ||
251 | } | ||
252 | size = nr_bp * sizeof(struct kvm_hw_bp_info_arch); | ||
253 | if (size > 0) { | ||
254 | bp_info = kmalloc(size, GFP_KERNEL); | ||
255 | if (!bp_info) { | ||
256 | ret = -ENOMEM; | ||
257 | goto error; | ||
258 | } | ||
259 | } | ||
260 | |||
261 | for (nr_wp = 0, nr_bp = 0, i = 0; i < dbg->arch.nr_hw_bp; i++) { | ||
262 | switch (bp_data[i].type) { | ||
263 | case KVM_HW_WP_WRITE: | ||
264 | ret = __import_wp_info(vcpu, &bp_data[i], | ||
265 | &wp_info[nr_wp]); | ||
266 | if (ret) | ||
267 | goto error; | ||
268 | nr_wp++; | ||
269 | break; | ||
270 | case KVM_HW_BP: | ||
271 | bp_info[nr_bp].len = bp_data[i].len; | ||
272 | bp_info[nr_bp].addr = bp_data[i].addr; | ||
273 | nr_bp++; | ||
274 | break; | ||
275 | } | ||
276 | } | ||
277 | |||
278 | vcpu->arch.guestdbg.nr_hw_bp = nr_bp; | ||
279 | vcpu->arch.guestdbg.hw_bp_info = bp_info; | ||
280 | vcpu->arch.guestdbg.nr_hw_wp = nr_wp; | ||
281 | vcpu->arch.guestdbg.hw_wp_info = wp_info; | ||
282 | return 0; | ||
283 | error: | ||
284 | kfree(bp_data); | ||
285 | kfree(wp_info); | ||
286 | kfree(bp_info); | ||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu) | ||
291 | { | ||
292 | int i; | ||
293 | struct kvm_hw_wp_info_arch *hw_wp_info = NULL; | ||
294 | |||
295 | for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { | ||
296 | hw_wp_info = &vcpu->arch.guestdbg.hw_wp_info[i]; | ||
297 | kfree(hw_wp_info->old_data); | ||
298 | hw_wp_info->old_data = NULL; | ||
299 | } | ||
300 | kfree(vcpu->arch.guestdbg.hw_wp_info); | ||
301 | vcpu->arch.guestdbg.hw_wp_info = NULL; | ||
302 | |||
303 | kfree(vcpu->arch.guestdbg.hw_bp_info); | ||
304 | vcpu->arch.guestdbg.hw_bp_info = NULL; | ||
305 | |||
306 | vcpu->arch.guestdbg.nr_hw_wp = 0; | ||
307 | vcpu->arch.guestdbg.nr_hw_bp = 0; | ||
308 | } | ||
309 | |||
310 | static inline int in_addr_range(u64 addr, u64 a, u64 b) | ||
311 | { | ||
312 | if (a <= b) | ||
313 | return (addr >= a) && (addr <= b); | ||
314 | else | ||
315 | /* "overflowing" interval */ | ||
316 | return (addr <= a) && (addr >= b); | ||
317 | } | ||
318 | |||
319 | #define end_of_range(bp_info) (bp_info->addr + bp_info->len - 1) | ||
320 | |||
321 | static struct kvm_hw_bp_info_arch *find_hw_bp(struct kvm_vcpu *vcpu, | ||
322 | unsigned long addr) | ||
323 | { | ||
324 | struct kvm_hw_bp_info_arch *bp_info = vcpu->arch.guestdbg.hw_bp_info; | ||
325 | int i; | ||
326 | |||
327 | if (vcpu->arch.guestdbg.nr_hw_bp == 0) | ||
328 | return NULL; | ||
329 | |||
330 | for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { | ||
331 | /* addr is directly the start or in the range of a bp */ | ||
332 | if (addr == bp_info->addr) | ||
333 | goto found; | ||
334 | if (bp_info->len > 0 && | ||
335 | in_addr_range(addr, bp_info->addr, end_of_range(bp_info))) | ||
336 | goto found; | ||
337 | |||
338 | bp_info++; | ||
339 | } | ||
340 | |||
341 | return NULL; | ||
342 | found: | ||
343 | return bp_info; | ||
344 | } | ||
345 | |||
346 | static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu) | ||
347 | { | ||
348 | int i; | ||
349 | struct kvm_hw_wp_info_arch *wp_info = NULL; | ||
350 | void *temp = NULL; | ||
351 | |||
352 | if (vcpu->arch.guestdbg.nr_hw_wp == 0) | ||
353 | return NULL; | ||
354 | |||
355 | for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { | ||
356 | wp_info = &vcpu->arch.guestdbg.hw_wp_info[i]; | ||
357 | if (!wp_info || !wp_info->old_data || wp_info->len <= 0) | ||
358 | continue; | ||
359 | |||
360 | temp = kmalloc(wp_info->len, GFP_KERNEL); | ||
361 | if (!temp) | ||
362 | continue; | ||
363 | |||
364 | /* refetch the wp data and compare it to the old value */ | ||
365 | if (!read_guest(vcpu, wp_info->phys_addr, temp, | ||
366 | wp_info->len)) { | ||
367 | if (memcmp(temp, wp_info->old_data, wp_info->len)) { | ||
368 | kfree(temp); | ||
369 | return wp_info; | ||
370 | } | ||
371 | } | ||
372 | kfree(temp); | ||
373 | temp = NULL; | ||
374 | } | ||
375 | |||
376 | return NULL; | ||
377 | } | ||
378 | |||
379 | void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu) | ||
380 | { | ||
381 | vcpu->run->exit_reason = KVM_EXIT_DEBUG; | ||
382 | vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING; | ||
383 | } | ||
384 | |||
385 | #define per_bp_event(code) \ | ||
386 | (code & (PER_EVENT_IFETCH | PER_EVENT_BRANCH)) | ||
387 | #define per_write_wp_event(code) \ | ||
388 | (code & (PER_EVENT_STORE | PER_EVENT_STORE_REAL)) | ||
389 | |||
390 | static int debug_exit_required(struct kvm_vcpu *vcpu) | ||
391 | { | ||
392 | u32 perc = (vcpu->arch.sie_block->perc << 24); | ||
393 | struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch; | ||
394 | struct kvm_hw_wp_info_arch *wp_info = NULL; | ||
395 | struct kvm_hw_bp_info_arch *bp_info = NULL; | ||
396 | unsigned long addr = vcpu->arch.sie_block->gpsw.addr; | ||
397 | unsigned long peraddr = vcpu->arch.sie_block->peraddr; | ||
398 | |||
399 | if (guestdbg_hw_bp_enabled(vcpu)) { | ||
400 | if (per_write_wp_event(perc) && | ||
401 | vcpu->arch.guestdbg.nr_hw_wp > 0) { | ||
402 | wp_info = any_wp_changed(vcpu); | ||
403 | if (wp_info) { | ||
404 | debug_exit->addr = wp_info->addr; | ||
405 | debug_exit->type = KVM_HW_WP_WRITE; | ||
406 | goto exit_required; | ||
407 | } | ||
408 | } | ||
409 | if (per_bp_event(perc) && | ||
410 | vcpu->arch.guestdbg.nr_hw_bp > 0) { | ||
411 | bp_info = find_hw_bp(vcpu, addr); | ||
412 | /* remove duplicate events if PC==PER address */ | ||
413 | if (bp_info && (addr != peraddr)) { | ||
414 | debug_exit->addr = addr; | ||
415 | debug_exit->type = KVM_HW_BP; | ||
416 | vcpu->arch.guestdbg.last_bp = addr; | ||
417 | goto exit_required; | ||
418 | } | ||
419 | /* breakpoint missed */ | ||
420 | bp_info = find_hw_bp(vcpu, peraddr); | ||
421 | if (bp_info && vcpu->arch.guestdbg.last_bp != peraddr) { | ||
422 | debug_exit->addr = peraddr; | ||
423 | debug_exit->type = KVM_HW_BP; | ||
424 | goto exit_required; | ||
425 | } | ||
426 | } | ||
427 | } | ||
428 | if (guestdbg_sstep_enabled(vcpu) && per_bp_event(perc)) { | ||
429 | debug_exit->addr = addr; | ||
430 | debug_exit->type = KVM_SINGLESTEP; | ||
431 | goto exit_required; | ||
432 | } | ||
433 | |||
434 | return 0; | ||
435 | exit_required: | ||
436 | return 1; | ||
437 | } | ||
438 | |||
439 | #define guest_per_enabled(vcpu) \ | ||
440 | (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) | ||
441 | |||
442 | static void filter_guest_per_event(struct kvm_vcpu *vcpu) | ||
443 | { | ||
444 | u32 perc = vcpu->arch.sie_block->perc << 24; | ||
445 | u64 peraddr = vcpu->arch.sie_block->peraddr; | ||
446 | u64 addr = vcpu->arch.sie_block->gpsw.addr; | ||
447 | u64 cr9 = vcpu->arch.sie_block->gcr[9]; | ||
448 | u64 cr10 = vcpu->arch.sie_block->gcr[10]; | ||
449 | u64 cr11 = vcpu->arch.sie_block->gcr[11]; | ||
450 | /* filter all events, demanded by the guest */ | ||
451 | u32 guest_perc = perc & cr9 & PER_EVENT_MASK; | ||
452 | |||
453 | if (!guest_per_enabled(vcpu)) | ||
454 | guest_perc = 0; | ||
455 | |||
456 | /* filter "successful-branching" events */ | ||
457 | if (guest_perc & PER_EVENT_BRANCH && | ||
458 | cr9 & PER_CONTROL_BRANCH_ADDRESS && | ||
459 | !in_addr_range(addr, cr10, cr11)) | ||
460 | guest_perc &= ~PER_EVENT_BRANCH; | ||
461 | |||
462 | /* filter "instruction-fetching" events */ | ||
463 | if (guest_perc & PER_EVENT_IFETCH && | ||
464 | !in_addr_range(peraddr, cr10, cr11)) | ||
465 | guest_perc &= ~PER_EVENT_IFETCH; | ||
466 | |||
467 | /* All other PER events will be given to the guest */ | ||
468 | /* TODO: Check alterated address/address space */ | ||
469 | |||
470 | vcpu->arch.sie_block->perc = guest_perc >> 24; | ||
471 | |||
472 | if (!guest_perc) | ||
473 | vcpu->arch.sie_block->iprcc &= ~PGM_PER; | ||
474 | } | ||
475 | |||
476 | void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu) | ||
477 | { | ||
478 | if (debug_exit_required(vcpu)) | ||
479 | vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING; | ||
480 | |||
481 | filter_guest_per_event(vcpu); | ||
482 | } | ||
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index eeb1ac7d8fa4..a0b586c1913c 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * in-kernel handling for sie intercepts | 2 | * in-kernel handling for sie intercepts |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2008, 2009 | 4 | * Copyright IBM Corp. 2008, 2014 |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License (version 2 only) | 7 | * it under the terms of the GNU General Public License (version 2 only) |
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/pagemap.h> | 16 | #include <linux/pagemap.h> |
17 | 17 | ||
18 | #include <asm/kvm_host.h> | 18 | #include <asm/kvm_host.h> |
19 | #include <asm/asm-offsets.h> | ||
20 | #include <asm/irq.h> | ||
19 | 21 | ||
20 | #include "kvm-s390.h" | 22 | #include "kvm-s390.h" |
21 | #include "gaccess.h" | 23 | #include "gaccess.h" |
@@ -29,6 +31,7 @@ static const intercept_handler_t instruction_handlers[256] = { | |||
29 | [0x83] = kvm_s390_handle_diag, | 31 | [0x83] = kvm_s390_handle_diag, |
30 | [0xae] = kvm_s390_handle_sigp, | 32 | [0xae] = kvm_s390_handle_sigp, |
31 | [0xb2] = kvm_s390_handle_b2, | 33 | [0xb2] = kvm_s390_handle_b2, |
34 | [0xb6] = kvm_s390_handle_stctl, | ||
32 | [0xb7] = kvm_s390_handle_lctl, | 35 | [0xb7] = kvm_s390_handle_lctl, |
33 | [0xb9] = kvm_s390_handle_b9, | 36 | [0xb9] = kvm_s390_handle_b9, |
34 | [0xe5] = kvm_s390_handle_e5, | 37 | [0xe5] = kvm_s390_handle_e5, |
@@ -44,9 +47,6 @@ static int handle_noop(struct kvm_vcpu *vcpu) | |||
44 | case 0x10: | 47 | case 0x10: |
45 | vcpu->stat.exit_external_request++; | 48 | vcpu->stat.exit_external_request++; |
46 | break; | 49 | break; |
47 | case 0x14: | ||
48 | vcpu->stat.exit_external_interrupt++; | ||
49 | break; | ||
50 | default: | 50 | default: |
51 | break; /* nothing */ | 51 | break; /* nothing */ |
52 | } | 52 | } |
@@ -63,8 +63,7 @@ static int handle_stop(struct kvm_vcpu *vcpu) | |||
63 | trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits); | 63 | trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits); |
64 | 64 | ||
65 | if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { | 65 | if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { |
66 | atomic_set_mask(CPUSTAT_STOPPED, | 66 | kvm_s390_vcpu_stop(vcpu); |
67 | &vcpu->arch.sie_block->cpuflags); | ||
68 | vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; | 67 | vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; |
69 | VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); | 68 | VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); |
70 | rc = -EOPNOTSUPP; | 69 | rc = -EOPNOTSUPP; |
@@ -109,22 +108,120 @@ static int handle_instruction(struct kvm_vcpu *vcpu) | |||
109 | return -EOPNOTSUPP; | 108 | return -EOPNOTSUPP; |
110 | } | 109 | } |
111 | 110 | ||
111 | static void __extract_prog_irq(struct kvm_vcpu *vcpu, | ||
112 | struct kvm_s390_pgm_info *pgm_info) | ||
113 | { | ||
114 | memset(pgm_info, 0, sizeof(struct kvm_s390_pgm_info)); | ||
115 | pgm_info->code = vcpu->arch.sie_block->iprcc; | ||
116 | |||
117 | switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) { | ||
118 | case PGM_AFX_TRANSLATION: | ||
119 | case PGM_ASX_TRANSLATION: | ||
120 | case PGM_EX_TRANSLATION: | ||
121 | case PGM_LFX_TRANSLATION: | ||
122 | case PGM_LSTE_SEQUENCE: | ||
123 | case PGM_LSX_TRANSLATION: | ||
124 | case PGM_LX_TRANSLATION: | ||
125 | case PGM_PRIMARY_AUTHORITY: | ||
126 | case PGM_SECONDARY_AUTHORITY: | ||
127 | case PGM_SPACE_SWITCH: | ||
128 | pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc; | ||
129 | break; | ||
130 | case PGM_ALEN_TRANSLATION: | ||
131 | case PGM_ALE_SEQUENCE: | ||
132 | case PGM_ASTE_INSTANCE: | ||
133 | case PGM_ASTE_SEQUENCE: | ||
134 | case PGM_ASTE_VALIDITY: | ||
135 | case PGM_EXTENDED_AUTHORITY: | ||
136 | pgm_info->exc_access_id = vcpu->arch.sie_block->eai; | ||
137 | break; | ||
138 | case PGM_ASCE_TYPE: | ||
139 | case PGM_PAGE_TRANSLATION: | ||
140 | case PGM_REGION_FIRST_TRANS: | ||
141 | case PGM_REGION_SECOND_TRANS: | ||
142 | case PGM_REGION_THIRD_TRANS: | ||
143 | case PGM_SEGMENT_TRANSLATION: | ||
144 | pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc; | ||
145 | pgm_info->exc_access_id = vcpu->arch.sie_block->eai; | ||
146 | pgm_info->op_access_id = vcpu->arch.sie_block->oai; | ||
147 | break; | ||
148 | case PGM_MONITOR: | ||
149 | pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn; | ||
150 | pgm_info->mon_code = vcpu->arch.sie_block->tecmc; | ||
151 | break; | ||
152 | case PGM_DATA: | ||
153 | pgm_info->data_exc_code = vcpu->arch.sie_block->dxc; | ||
154 | break; | ||
155 | case PGM_PROTECTION: | ||
156 | pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc; | ||
157 | pgm_info->exc_access_id = vcpu->arch.sie_block->eai; | ||
158 | break; | ||
159 | default: | ||
160 | break; | ||
161 | } | ||
162 | |||
163 | if (vcpu->arch.sie_block->iprcc & PGM_PER) { | ||
164 | pgm_info->per_code = vcpu->arch.sie_block->perc; | ||
165 | pgm_info->per_atmid = vcpu->arch.sie_block->peratmid; | ||
166 | pgm_info->per_address = vcpu->arch.sie_block->peraddr; | ||
167 | pgm_info->per_access_id = vcpu->arch.sie_block->peraid; | ||
168 | } | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * restore ITDB to program-interruption TDB in guest lowcore | ||
173 | * and set TX abort indication if required | ||
174 | */ | ||
175 | static int handle_itdb(struct kvm_vcpu *vcpu) | ||
176 | { | ||
177 | struct kvm_s390_itdb *itdb; | ||
178 | int rc; | ||
179 | |||
180 | if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu)) | ||
181 | return 0; | ||
182 | if (current->thread.per_flags & PER_FLAG_NO_TE) | ||
183 | return 0; | ||
184 | itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba; | ||
185 | rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb)); | ||
186 | if (rc) | ||
187 | return rc; | ||
188 | memset(itdb, 0, sizeof(*itdb)); | ||
189 | |||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER) | ||
194 | |||
112 | static int handle_prog(struct kvm_vcpu *vcpu) | 195 | static int handle_prog(struct kvm_vcpu *vcpu) |
113 | { | 196 | { |
197 | struct kvm_s390_pgm_info pgm_info; | ||
198 | psw_t psw; | ||
199 | int rc; | ||
200 | |||
114 | vcpu->stat.exit_program_interruption++; | 201 | vcpu->stat.exit_program_interruption++; |
115 | 202 | ||
116 | /* Restore ITDB to Program-Interruption TDB in guest memory */ | 203 | if (guestdbg_enabled(vcpu) && per_event(vcpu)) { |
117 | if (IS_TE_ENABLED(vcpu) && | 204 | kvm_s390_handle_per_event(vcpu); |
118 | !(current->thread.per_flags & PER_FLAG_NO_TE) && | 205 | /* the interrupt might have been filtered out completely */ |
119 | IS_ITDB_VALID(vcpu)) { | 206 | if (vcpu->arch.sie_block->iprcc == 0) |
120 | copy_to_guest(vcpu, TDB_ADDR, vcpu->arch.sie_block->itdba, | 207 | return 0; |
121 | sizeof(struct kvm_s390_itdb)); | ||
122 | memset((void *) vcpu->arch.sie_block->itdba, 0, | ||
123 | sizeof(struct kvm_s390_itdb)); | ||
124 | } | 208 | } |
125 | 209 | ||
126 | trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc); | 210 | trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc); |
127 | return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc); | 211 | if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) { |
212 | rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t)); | ||
213 | if (rc) | ||
214 | return rc; | ||
215 | /* Avoid endless loops of specification exceptions */ | ||
216 | if (!is_valid_psw(&psw)) | ||
217 | return -EOPNOTSUPP; | ||
218 | } | ||
219 | rc = handle_itdb(vcpu); | ||
220 | if (rc) | ||
221 | return rc; | ||
222 | |||
223 | __extract_prog_irq(vcpu, &pgm_info); | ||
224 | return kvm_s390_inject_prog_irq(vcpu, &pgm_info); | ||
128 | } | 225 | } |
129 | 226 | ||
130 | static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) | 227 | static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) |
@@ -142,17 +239,110 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) | |||
142 | return rc2; | 239 | return rc2; |
143 | } | 240 | } |
144 | 241 | ||
242 | /** | ||
243 | * handle_external_interrupt - used for external interruption interceptions | ||
244 | * | ||
245 | * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if | ||
246 | * the new PSW does not have external interrupts disabled. In the first case, | ||
247 | * we've got to deliver the interrupt manually, and in the second case, we | ||
248 | * drop to userspace to handle the situation there. | ||
249 | */ | ||
250 | static int handle_external_interrupt(struct kvm_vcpu *vcpu) | ||
251 | { | ||
252 | u16 eic = vcpu->arch.sie_block->eic; | ||
253 | struct kvm_s390_interrupt irq; | ||
254 | psw_t newpsw; | ||
255 | int rc; | ||
256 | |||
257 | vcpu->stat.exit_external_interrupt++; | ||
258 | |||
259 | rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t)); | ||
260 | if (rc) | ||
261 | return rc; | ||
262 | /* We can not handle clock comparator or timer interrupt with bad PSW */ | ||
263 | if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) && | ||
264 | (newpsw.mask & PSW_MASK_EXT)) | ||
265 | return -EOPNOTSUPP; | ||
266 | |||
267 | switch (eic) { | ||
268 | case EXT_IRQ_CLK_COMP: | ||
269 | irq.type = KVM_S390_INT_CLOCK_COMP; | ||
270 | break; | ||
271 | case EXT_IRQ_CPU_TIMER: | ||
272 | irq.type = KVM_S390_INT_CPU_TIMER; | ||
273 | break; | ||
274 | case EXT_IRQ_EXTERNAL_CALL: | ||
275 | if (kvm_s390_si_ext_call_pending(vcpu)) | ||
276 | return 0; | ||
277 | irq.type = KVM_S390_INT_EXTERNAL_CALL; | ||
278 | irq.parm = vcpu->arch.sie_block->extcpuaddr; | ||
279 | break; | ||
280 | default: | ||
281 | return -EOPNOTSUPP; | ||
282 | } | ||
283 | |||
284 | return kvm_s390_inject_vcpu(vcpu, &irq); | ||
285 | } | ||
286 | |||
287 | /** | ||
288 | * Handle MOVE PAGE partial execution interception. | ||
289 | * | ||
290 | * This interception can only happen for guests with DAT disabled and | ||
291 | * addresses that are currently not mapped in the host. Thus we try to | ||
292 | * set up the mappings for the corresponding user pages here (or throw | ||
293 | * addressing exceptions in case of illegal guest addresses). | ||
294 | */ | ||
295 | static int handle_mvpg_pei(struct kvm_vcpu *vcpu) | ||
296 | { | ||
297 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | ||
298 | unsigned long srcaddr, dstaddr; | ||
299 | int reg1, reg2, rc; | ||
300 | |||
301 | kvm_s390_get_regs_rre(vcpu, ®1, ®2); | ||
302 | |||
303 | /* Make sure that the source is paged-in */ | ||
304 | srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]); | ||
305 | if (kvm_is_error_gpa(vcpu->kvm, srcaddr)) | ||
306 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
307 | rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); | ||
308 | if (rc != 0) | ||
309 | return rc; | ||
310 | |||
311 | /* Make sure that the destination is paged-in */ | ||
312 | dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]); | ||
313 | if (kvm_is_error_gpa(vcpu->kvm, dstaddr)) | ||
314 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
315 | rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); | ||
316 | if (rc != 0) | ||
317 | return rc; | ||
318 | |||
319 | psw->addr = __rewind_psw(*psw, 4); | ||
320 | |||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static int handle_partial_execution(struct kvm_vcpu *vcpu) | ||
325 | { | ||
326 | if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */ | ||
327 | return handle_mvpg_pei(vcpu); | ||
328 | if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */ | ||
329 | return kvm_s390_handle_sigp_pei(vcpu); | ||
330 | |||
331 | return -EOPNOTSUPP; | ||
332 | } | ||
333 | |||
145 | static const intercept_handler_t intercept_funcs[] = { | 334 | static const intercept_handler_t intercept_funcs[] = { |
146 | [0x00 >> 2] = handle_noop, | 335 | [0x00 >> 2] = handle_noop, |
147 | [0x04 >> 2] = handle_instruction, | 336 | [0x04 >> 2] = handle_instruction, |
148 | [0x08 >> 2] = handle_prog, | 337 | [0x08 >> 2] = handle_prog, |
149 | [0x0C >> 2] = handle_instruction_and_prog, | 338 | [0x0C >> 2] = handle_instruction_and_prog, |
150 | [0x10 >> 2] = handle_noop, | 339 | [0x10 >> 2] = handle_noop, |
151 | [0x14 >> 2] = handle_noop, | 340 | [0x14 >> 2] = handle_external_interrupt, |
152 | [0x18 >> 2] = handle_noop, | 341 | [0x18 >> 2] = handle_noop, |
153 | [0x1C >> 2] = kvm_s390_handle_wait, | 342 | [0x1C >> 2] = kvm_s390_handle_wait, |
154 | [0x20 >> 2] = handle_validity, | 343 | [0x20 >> 2] = handle_validity, |
155 | [0x28 >> 2] = handle_stop, | 344 | [0x28 >> 2] = handle_stop, |
345 | [0x38 >> 2] = handle_partial_execution, | ||
156 | }; | 346 | }; |
157 | 347 | ||
158 | int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) | 348 | int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 200a8f9390b6..90c8de22a2a0 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #define IOINT_CSSID_MASK 0x03fc0000 | 27 | #define IOINT_CSSID_MASK 0x03fc0000 |
28 | #define IOINT_AI_MASK 0x04000000 | 28 | #define IOINT_AI_MASK 0x04000000 |
29 | 29 | ||
30 | static void deliver_ckc_interrupt(struct kvm_vcpu *vcpu); | ||
31 | |||
30 | static int is_ioint(u64 type) | 32 | static int is_ioint(u64 type) |
31 | { | 33 | { |
32 | return ((type & 0xfffe0000u) != 0xfffe0000u); | 34 | return ((type & 0xfffe0000u) != 0xfffe0000u); |
@@ -56,6 +58,17 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) | |||
56 | return 1; | 58 | return 1; |
57 | } | 59 | } |
58 | 60 | ||
61 | static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) | ||
62 | { | ||
63 | if (psw_extint_disabled(vcpu) || | ||
64 | !(vcpu->arch.sie_block->gcr[0] & 0x800ul)) | ||
65 | return 0; | ||
66 | if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu)) | ||
67 | /* No timer interrupts when single stepping */ | ||
68 | return 0; | ||
69 | return 1; | ||
70 | } | ||
71 | |||
59 | static u64 int_word_to_isc_bits(u32 int_word) | 72 | static u64 int_word_to_isc_bits(u32 int_word) |
60 | { | 73 | { |
61 | u8 isc = (int_word & 0x38000000) >> 27; | 74 | u8 isc = (int_word & 0x38000000) >> 27; |
@@ -78,6 +91,14 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, | |||
78 | if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) | 91 | if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) |
79 | return 1; | 92 | return 1; |
80 | return 0; | 93 | return 0; |
94 | case KVM_S390_INT_CLOCK_COMP: | ||
95 | return ckc_interrupts_enabled(vcpu); | ||
96 | case KVM_S390_INT_CPU_TIMER: | ||
97 | if (psw_extint_disabled(vcpu)) | ||
98 | return 0; | ||
99 | if (vcpu->arch.sie_block->gcr[0] & 0x400ul) | ||
100 | return 1; | ||
101 | return 0; | ||
81 | case KVM_S390_INT_SERVICE: | 102 | case KVM_S390_INT_SERVICE: |
82 | case KVM_S390_INT_PFAULT_INIT: | 103 | case KVM_S390_INT_PFAULT_INIT: |
83 | case KVM_S390_INT_PFAULT_DONE: | 104 | case KVM_S390_INT_PFAULT_DONE: |
@@ -127,11 +148,16 @@ static void __unset_cpu_idle(struct kvm_vcpu *vcpu) | |||
127 | 148 | ||
128 | static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) | 149 | static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) |
129 | { | 150 | { |
130 | atomic_clear_mask(CPUSTAT_ECALL_PEND | | 151 | atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, |
131 | CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, | 152 | &vcpu->arch.sie_block->cpuflags); |
132 | &vcpu->arch.sie_block->cpuflags); | ||
133 | vcpu->arch.sie_block->lctl = 0x0000; | 153 | vcpu->arch.sie_block->lctl = 0x0000; |
134 | vcpu->arch.sie_block->ictl &= ~ICTL_LPSW; | 154 | vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); |
155 | |||
156 | if (guestdbg_enabled(vcpu)) { | ||
157 | vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 | | ||
158 | LCTL_CR10 | LCTL_CR11); | ||
159 | vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); | ||
160 | } | ||
135 | } | 161 | } |
136 | 162 | ||
137 | static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) | 163 | static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) |
@@ -149,6 +175,8 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu, | |||
149 | case KVM_S390_INT_PFAULT_INIT: | 175 | case KVM_S390_INT_PFAULT_INIT: |
150 | case KVM_S390_INT_PFAULT_DONE: | 176 | case KVM_S390_INT_PFAULT_DONE: |
151 | case KVM_S390_INT_VIRTIO: | 177 | case KVM_S390_INT_VIRTIO: |
178 | case KVM_S390_INT_CLOCK_COMP: | ||
179 | case KVM_S390_INT_CPU_TIMER: | ||
152 | if (psw_extint_disabled(vcpu)) | 180 | if (psw_extint_disabled(vcpu)) |
153 | __set_cpuflag(vcpu, CPUSTAT_EXT_INT); | 181 | __set_cpuflag(vcpu, CPUSTAT_EXT_INT); |
154 | else | 182 | else |
@@ -174,6 +202,106 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu, | |||
174 | } | 202 | } |
175 | } | 203 | } |
176 | 204 | ||
205 | static int __deliver_prog_irq(struct kvm_vcpu *vcpu, | ||
206 | struct kvm_s390_pgm_info *pgm_info) | ||
207 | { | ||
208 | const unsigned short table[] = { 2, 4, 4, 6 }; | ||
209 | int rc = 0; | ||
210 | |||
211 | switch (pgm_info->code & ~PGM_PER) { | ||
212 | case PGM_AFX_TRANSLATION: | ||
213 | case PGM_ASX_TRANSLATION: | ||
214 | case PGM_EX_TRANSLATION: | ||
215 | case PGM_LFX_TRANSLATION: | ||
216 | case PGM_LSTE_SEQUENCE: | ||
217 | case PGM_LSX_TRANSLATION: | ||
218 | case PGM_LX_TRANSLATION: | ||
219 | case PGM_PRIMARY_AUTHORITY: | ||
220 | case PGM_SECONDARY_AUTHORITY: | ||
221 | case PGM_SPACE_SWITCH: | ||
222 | rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, | ||
223 | (u64 *)__LC_TRANS_EXC_CODE); | ||
224 | break; | ||
225 | case PGM_ALEN_TRANSLATION: | ||
226 | case PGM_ALE_SEQUENCE: | ||
227 | case PGM_ASTE_INSTANCE: | ||
228 | case PGM_ASTE_SEQUENCE: | ||
229 | case PGM_ASTE_VALIDITY: | ||
230 | case PGM_EXTENDED_AUTHORITY: | ||
231 | rc = put_guest_lc(vcpu, pgm_info->exc_access_id, | ||
232 | (u8 *)__LC_EXC_ACCESS_ID); | ||
233 | break; | ||
234 | case PGM_ASCE_TYPE: | ||
235 | case PGM_PAGE_TRANSLATION: | ||
236 | case PGM_REGION_FIRST_TRANS: | ||
237 | case PGM_REGION_SECOND_TRANS: | ||
238 | case PGM_REGION_THIRD_TRANS: | ||
239 | case PGM_SEGMENT_TRANSLATION: | ||
240 | rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, | ||
241 | (u64 *)__LC_TRANS_EXC_CODE); | ||
242 | rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, | ||
243 | (u8 *)__LC_EXC_ACCESS_ID); | ||
244 | rc |= put_guest_lc(vcpu, pgm_info->op_access_id, | ||
245 | (u8 *)__LC_OP_ACCESS_ID); | ||
246 | break; | ||
247 | case PGM_MONITOR: | ||
248 | rc = put_guest_lc(vcpu, pgm_info->mon_class_nr, | ||
249 | (u64 *)__LC_MON_CLASS_NR); | ||
250 | rc |= put_guest_lc(vcpu, pgm_info->mon_code, | ||
251 | (u64 *)__LC_MON_CODE); | ||
252 | break; | ||
253 | case PGM_DATA: | ||
254 | rc = put_guest_lc(vcpu, pgm_info->data_exc_code, | ||
255 | (u32 *)__LC_DATA_EXC_CODE); | ||
256 | break; | ||
257 | case PGM_PROTECTION: | ||
258 | rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, | ||
259 | (u64 *)__LC_TRANS_EXC_CODE); | ||
260 | rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, | ||
261 | (u8 *)__LC_EXC_ACCESS_ID); | ||
262 | break; | ||
263 | } | ||
264 | |||
265 | if (pgm_info->code & PGM_PER) { | ||
266 | rc |= put_guest_lc(vcpu, pgm_info->per_code, | ||
267 | (u8 *) __LC_PER_CODE); | ||
268 | rc |= put_guest_lc(vcpu, pgm_info->per_atmid, | ||
269 | (u8 *)__LC_PER_ATMID); | ||
270 | rc |= put_guest_lc(vcpu, pgm_info->per_address, | ||
271 | (u64 *) __LC_PER_ADDRESS); | ||
272 | rc |= put_guest_lc(vcpu, pgm_info->per_access_id, | ||
273 | (u8 *) __LC_PER_ACCESS_ID); | ||
274 | } | ||
275 | |||
276 | switch (vcpu->arch.sie_block->icptcode) { | ||
277 | case ICPT_INST: | ||
278 | case ICPT_INSTPROGI: | ||
279 | case ICPT_OPEREXC: | ||
280 | case ICPT_PARTEXEC: | ||
281 | case ICPT_IOINST: | ||
282 | /* last instruction only stored for these icptcodes */ | ||
283 | rc |= put_guest_lc(vcpu, table[vcpu->arch.sie_block->ipa >> 14], | ||
284 | (u16 *) __LC_PGM_ILC); | ||
285 | break; | ||
286 | case ICPT_PROGI: | ||
287 | rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->pgmilc, | ||
288 | (u16 *) __LC_PGM_ILC); | ||
289 | break; | ||
290 | default: | ||
291 | rc |= put_guest_lc(vcpu, 0, | ||
292 | (u16 *) __LC_PGM_ILC); | ||
293 | } | ||
294 | |||
295 | rc |= put_guest_lc(vcpu, pgm_info->code, | ||
296 | (u16 *)__LC_PGM_INT_CODE); | ||
297 | rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, | ||
298 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
299 | rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW, | ||
300 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
301 | |||
302 | return rc; | ||
303 | } | ||
304 | |||
177 | static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | 305 | static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, |
178 | struct kvm_s390_interrupt_info *inti) | 306 | struct kvm_s390_interrupt_info *inti) |
179 | { | 307 | { |
@@ -186,26 +314,46 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
186 | vcpu->stat.deliver_emergency_signal++; | 314 | vcpu->stat.deliver_emergency_signal++; |
187 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 315 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, |
188 | inti->emerg.code, 0); | 316 | inti->emerg.code, 0); |
189 | rc = put_guest(vcpu, 0x1201, (u16 __user *)__LC_EXT_INT_CODE); | 317 | rc = put_guest_lc(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE); |
190 | rc |= put_guest(vcpu, inti->emerg.code, | 318 | rc |= put_guest_lc(vcpu, inti->emerg.code, |
191 | (u16 __user *)__LC_EXT_CPU_ADDR); | 319 | (u16 *)__LC_EXT_CPU_ADDR); |
192 | rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | 320 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
321 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
322 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | ||
193 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 323 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
194 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
195 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | ||
196 | break; | 324 | break; |
197 | case KVM_S390_INT_EXTERNAL_CALL: | 325 | case KVM_S390_INT_EXTERNAL_CALL: |
198 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); | 326 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); |
199 | vcpu->stat.deliver_external_call++; | 327 | vcpu->stat.deliver_external_call++; |
200 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 328 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, |
201 | inti->extcall.code, 0); | 329 | inti->extcall.code, 0); |
202 | rc = put_guest(vcpu, 0x1202, (u16 __user *)__LC_EXT_INT_CODE); | 330 | rc = put_guest_lc(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE); |
203 | rc |= put_guest(vcpu, inti->extcall.code, | 331 | rc |= put_guest_lc(vcpu, inti->extcall.code, |
204 | (u16 __user *)__LC_EXT_CPU_ADDR); | 332 | (u16 *)__LC_EXT_CPU_ADDR); |
205 | rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | 333 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
334 | &vcpu->arch.sie_block->gpsw, | ||
335 | sizeof(psw_t)); | ||
336 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | ||
337 | &vcpu->arch.sie_block->gpsw, | ||
338 | sizeof(psw_t)); | ||
339 | break; | ||
340 | case KVM_S390_INT_CLOCK_COMP: | ||
341 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | ||
342 | inti->ext.ext_params, 0); | ||
343 | deliver_ckc_interrupt(vcpu); | ||
344 | break; | ||
345 | case KVM_S390_INT_CPU_TIMER: | ||
346 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | ||
347 | inti->ext.ext_params, 0); | ||
348 | rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, | ||
349 | (u16 *)__LC_EXT_INT_CODE); | ||
350 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, | ||
351 | &vcpu->arch.sie_block->gpsw, | ||
352 | sizeof(psw_t)); | ||
353 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | ||
206 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 354 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
207 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | 355 | rc |= put_guest_lc(vcpu, inti->ext.ext_params, |
208 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | 356 | (u32 *)__LC_EXT_PARAMS); |
209 | break; | 357 | break; |
210 | case KVM_S390_INT_SERVICE: | 358 | case KVM_S390_INT_SERVICE: |
211 | VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", | 359 | VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", |
@@ -213,37 +361,39 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
213 | vcpu->stat.deliver_service_signal++; | 361 | vcpu->stat.deliver_service_signal++; |
214 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 362 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, |
215 | inti->ext.ext_params, 0); | 363 | inti->ext.ext_params, 0); |
216 | rc = put_guest(vcpu, 0x2401, (u16 __user *)__LC_EXT_INT_CODE); | 364 | rc = put_guest_lc(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE); |
217 | rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | 365 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
366 | &vcpu->arch.sie_block->gpsw, | ||
367 | sizeof(psw_t)); | ||
368 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | ||
218 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 369 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
219 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | 370 | rc |= put_guest_lc(vcpu, inti->ext.ext_params, |
220 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | 371 | (u32 *)__LC_EXT_PARAMS); |
221 | rc |= put_guest(vcpu, inti->ext.ext_params, | ||
222 | (u32 __user *)__LC_EXT_PARAMS); | ||
223 | break; | 372 | break; |
224 | case KVM_S390_INT_PFAULT_INIT: | 373 | case KVM_S390_INT_PFAULT_INIT: |
225 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, | 374 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, |
226 | inti->ext.ext_params2); | 375 | inti->ext.ext_params2); |
227 | rc = put_guest(vcpu, 0x2603, (u16 __user *) __LC_EXT_INT_CODE); | 376 | rc = put_guest_lc(vcpu, 0x2603, (u16 *) __LC_EXT_INT_CODE); |
228 | rc |= put_guest(vcpu, 0x0600, (u16 __user *) __LC_EXT_CPU_ADDR); | 377 | rc |= put_guest_lc(vcpu, 0x0600, (u16 *) __LC_EXT_CPU_ADDR); |
229 | rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | 378 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
379 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
380 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | ||
230 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 381 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
231 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | 382 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, |
232 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | 383 | (u64 *) __LC_EXT_PARAMS2); |
233 | rc |= put_guest(vcpu, inti->ext.ext_params2, | ||
234 | (u64 __user *) __LC_EXT_PARAMS2); | ||
235 | break; | 384 | break; |
236 | case KVM_S390_INT_PFAULT_DONE: | 385 | case KVM_S390_INT_PFAULT_DONE: |
237 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, | 386 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, |
238 | inti->ext.ext_params2); | 387 | inti->ext.ext_params2); |
239 | rc = put_guest(vcpu, 0x2603, (u16 __user *) __LC_EXT_INT_CODE); | 388 | rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); |
240 | rc |= put_guest(vcpu, 0x0680, (u16 __user *) __LC_EXT_CPU_ADDR); | 389 | rc |= put_guest_lc(vcpu, 0x0680, (u16 *)__LC_EXT_CPU_ADDR); |
241 | rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | 390 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
391 | &vcpu->arch.sie_block->gpsw, | ||
392 | sizeof(psw_t)); | ||
393 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | ||
242 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 394 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
243 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | 395 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, |
244 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | 396 | (u64 *)__LC_EXT_PARAMS2); |
245 | rc |= put_guest(vcpu, inti->ext.ext_params2, | ||
246 | (u64 __user *) __LC_EXT_PARAMS2); | ||
247 | break; | 397 | break; |
248 | case KVM_S390_INT_VIRTIO: | 398 | case KVM_S390_INT_VIRTIO: |
249 | VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", | 399 | VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", |
@@ -252,16 +402,17 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
252 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 402 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, |
253 | inti->ext.ext_params, | 403 | inti->ext.ext_params, |
254 | inti->ext.ext_params2); | 404 | inti->ext.ext_params2); |
255 | rc = put_guest(vcpu, 0x2603, (u16 __user *)__LC_EXT_INT_CODE); | 405 | rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); |
256 | rc |= put_guest(vcpu, 0x0d00, (u16 __user *)__LC_EXT_CPU_ADDR); | 406 | rc |= put_guest_lc(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR); |
257 | rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | 407 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
408 | &vcpu->arch.sie_block->gpsw, | ||
409 | sizeof(psw_t)); | ||
410 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | ||
258 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 411 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
259 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | 412 | rc |= put_guest_lc(vcpu, inti->ext.ext_params, |
260 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | 413 | (u32 *)__LC_EXT_PARAMS); |
261 | rc |= put_guest(vcpu, inti->ext.ext_params, | 414 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, |
262 | (u32 __user *)__LC_EXT_PARAMS); | 415 | (u64 *)__LC_EXT_PARAMS2); |
263 | rc |= put_guest(vcpu, inti->ext.ext_params2, | ||
264 | (u64 __user *)__LC_EXT_PARAMS2); | ||
265 | break; | 416 | break; |
266 | case KVM_S390_SIGP_STOP: | 417 | case KVM_S390_SIGP_STOP: |
267 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); | 418 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); |
@@ -285,13 +436,12 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
285 | vcpu->stat.deliver_restart_signal++; | 436 | vcpu->stat.deliver_restart_signal++; |
286 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 437 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, |
287 | 0, 0); | 438 | 0, 0); |
288 | rc = copy_to_guest(vcpu, | 439 | rc = write_guest_lc(vcpu, |
289 | offsetof(struct _lowcore, restart_old_psw), | 440 | offsetof(struct _lowcore, restart_old_psw), |
290 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 441 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
291 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | 442 | rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), |
292 | offsetof(struct _lowcore, restart_psw), | 443 | &vcpu->arch.sie_block->gpsw, |
293 | sizeof(psw_t)); | 444 | sizeof(psw_t)); |
294 | atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | ||
295 | break; | 445 | break; |
296 | case KVM_S390_PROGRAM_INT: | 446 | case KVM_S390_PROGRAM_INT: |
297 | VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", | 447 | VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", |
@@ -300,13 +450,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
300 | vcpu->stat.deliver_program_int++; | 450 | vcpu->stat.deliver_program_int++; |
301 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 451 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, |
302 | inti->pgm.code, 0); | 452 | inti->pgm.code, 0); |
303 | rc = put_guest(vcpu, inti->pgm.code, (u16 __user *)__LC_PGM_INT_CODE); | 453 | rc = __deliver_prog_irq(vcpu, &inti->pgm); |
304 | rc |= put_guest(vcpu, table[vcpu->arch.sie_block->ipa >> 14], | ||
305 | (u16 __user *)__LC_PGM_ILC); | ||
306 | rc |= copy_to_guest(vcpu, __LC_PGM_OLD_PSW, | ||
307 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
308 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
309 | __LC_PGM_NEW_PSW, sizeof(psw_t)); | ||
310 | break; | 454 | break; |
311 | 455 | ||
312 | case KVM_S390_MCHK: | 456 | case KVM_S390_MCHK: |
@@ -317,11 +461,12 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
317 | inti->mchk.mcic); | 461 | inti->mchk.mcic); |
318 | rc = kvm_s390_vcpu_store_status(vcpu, | 462 | rc = kvm_s390_vcpu_store_status(vcpu, |
319 | KVM_S390_STORE_STATUS_PREFIXED); | 463 | KVM_S390_STORE_STATUS_PREFIXED); |
320 | rc |= put_guest(vcpu, inti->mchk.mcic, (u64 __user *) __LC_MCCK_CODE); | 464 | rc |= put_guest_lc(vcpu, inti->mchk.mcic, (u64 *)__LC_MCCK_CODE); |
321 | rc |= copy_to_guest(vcpu, __LC_MCK_OLD_PSW, | 465 | rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, |
466 | &vcpu->arch.sie_block->gpsw, | ||
467 | sizeof(psw_t)); | ||
468 | rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, | ||
322 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 469 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
323 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
324 | __LC_MCK_NEW_PSW, sizeof(psw_t)); | ||
325 | break; | 470 | break; |
326 | 471 | ||
327 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | 472 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
@@ -334,18 +479,20 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
334 | vcpu->stat.deliver_io_int++; | 479 | vcpu->stat.deliver_io_int++; |
335 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 480 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, |
336 | param0, param1); | 481 | param0, param1); |
337 | rc = put_guest(vcpu, inti->io.subchannel_id, | 482 | rc = put_guest_lc(vcpu, inti->io.subchannel_id, |
338 | (u16 __user *) __LC_SUBCHANNEL_ID); | 483 | (u16 *)__LC_SUBCHANNEL_ID); |
339 | rc |= put_guest(vcpu, inti->io.subchannel_nr, | 484 | rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, |
340 | (u16 __user *) __LC_SUBCHANNEL_NR); | 485 | (u16 *)__LC_SUBCHANNEL_NR); |
341 | rc |= put_guest(vcpu, inti->io.io_int_parm, | 486 | rc |= put_guest_lc(vcpu, inti->io.io_int_parm, |
342 | (u32 __user *) __LC_IO_INT_PARM); | 487 | (u32 *)__LC_IO_INT_PARM); |
343 | rc |= put_guest(vcpu, inti->io.io_int_word, | 488 | rc |= put_guest_lc(vcpu, inti->io.io_int_word, |
344 | (u32 __user *) __LC_IO_INT_WORD); | 489 | (u32 *)__LC_IO_INT_WORD); |
345 | rc |= copy_to_guest(vcpu, __LC_IO_OLD_PSW, | 490 | rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, |
346 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 491 | &vcpu->arch.sie_block->gpsw, |
347 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | 492 | sizeof(psw_t)); |
348 | __LC_IO_NEW_PSW, sizeof(psw_t)); | 493 | rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, |
494 | &vcpu->arch.sie_block->gpsw, | ||
495 | sizeof(psw_t)); | ||
349 | break; | 496 | break; |
350 | } | 497 | } |
351 | default: | 498 | default: |
@@ -358,25 +505,35 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
358 | } | 505 | } |
359 | } | 506 | } |
360 | 507 | ||
361 | static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) | 508 | static void deliver_ckc_interrupt(struct kvm_vcpu *vcpu) |
362 | { | 509 | { |
363 | int rc; | 510 | int rc; |
364 | 511 | ||
365 | if (psw_extint_disabled(vcpu)) | 512 | rc = put_guest_lc(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE); |
366 | return 0; | 513 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
367 | if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) | 514 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
368 | return 0; | 515 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
369 | rc = put_guest(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE); | 516 | &vcpu->arch.sie_block->gpsw, |
370 | rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | 517 | sizeof(psw_t)); |
371 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
372 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
373 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | ||
374 | if (rc) { | 518 | if (rc) { |
375 | printk("kvm: The guest lowcore is not mapped during interrupt " | 519 | printk("kvm: The guest lowcore is not mapped during interrupt " |
376 | "delivery, killing userspace\n"); | 520 | "delivery, killing userspace\n"); |
377 | do_exit(SIGKILL); | 521 | do_exit(SIGKILL); |
378 | } | 522 | } |
379 | return 1; | 523 | } |
524 | |||
525 | /* Check whether SIGP interpretation facility has an external call pending */ | ||
526 | int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu) | ||
527 | { | ||
528 | atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl; | ||
529 | |||
530 | if (!psw_extint_disabled(vcpu) && | ||
531 | (vcpu->arch.sie_block->gcr[0] & 0x2000ul) && | ||
532 | (atomic_read(sigp_ctrl) & SIGP_CTRL_C) && | ||
533 | (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND)) | ||
534 | return 1; | ||
535 | |||
536 | return 0; | ||
380 | } | 537 | } |
381 | 538 | ||
382 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | 539 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) |
@@ -406,19 +563,23 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | |||
406 | spin_unlock(&fi->lock); | 563 | spin_unlock(&fi->lock); |
407 | } | 564 | } |
408 | 565 | ||
409 | if ((!rc) && (vcpu->arch.sie_block->ckc < | 566 | if (!rc && kvm_cpu_has_pending_timer(vcpu)) |
410 | get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) { | 567 | rc = 1; |
411 | if ((!psw_extint_disabled(vcpu)) && | 568 | |
412 | (vcpu->arch.sie_block->gcr[0] & 0x800ul)) | 569 | if (!rc && kvm_s390_si_ext_call_pending(vcpu)) |
413 | rc = 1; | 570 | rc = 1; |
414 | } | ||
415 | 571 | ||
416 | return rc; | 572 | return rc; |
417 | } | 573 | } |
418 | 574 | ||
419 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | 575 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
420 | { | 576 | { |
421 | return 0; | 577 | if (!(vcpu->arch.sie_block->ckc < |
578 | get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) | ||
579 | return 0; | ||
580 | if (!ckc_interrupts_enabled(vcpu)) | ||
581 | return 0; | ||
582 | return 1; | ||
422 | } | 583 | } |
423 | 584 | ||
424 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | 585 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) |
@@ -441,8 +602,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | |||
441 | return -EOPNOTSUPP; /* disabled wait */ | 602 | return -EOPNOTSUPP; /* disabled wait */ |
442 | } | 603 | } |
443 | 604 | ||
444 | if (psw_extint_disabled(vcpu) || | 605 | if (!ckc_interrupts_enabled(vcpu)) { |
445 | (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) { | ||
446 | VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); | 606 | VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); |
447 | goto no_timer; | 607 | goto no_timer; |
448 | } | 608 | } |
@@ -465,7 +625,8 @@ no_timer: | |||
465 | while (list_empty(&vcpu->arch.local_int.list) && | 625 | while (list_empty(&vcpu->arch.local_int.list) && |
466 | list_empty(&vcpu->arch.local_int.float_int->list) && | 626 | list_empty(&vcpu->arch.local_int.float_int->list) && |
467 | (!vcpu->arch.local_int.timer_due) && | 627 | (!vcpu->arch.local_int.timer_due) && |
468 | !signal_pending(current)) { | 628 | !signal_pending(current) && |
629 | !kvm_s390_si_ext_call_pending(vcpu)) { | ||
469 | set_current_state(TASK_INTERRUPTIBLE); | 630 | set_current_state(TASK_INTERRUPTIBLE); |
470 | spin_unlock_bh(&vcpu->arch.local_int.lock); | 631 | spin_unlock_bh(&vcpu->arch.local_int.lock); |
471 | spin_unlock(&vcpu->arch.local_int.float_int->lock); | 632 | spin_unlock(&vcpu->arch.local_int.float_int->lock); |
@@ -522,6 +683,11 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) | |||
522 | } | 683 | } |
523 | atomic_set(&li->active, 0); | 684 | atomic_set(&li->active, 0); |
524 | spin_unlock_bh(&li->lock); | 685 | spin_unlock_bh(&li->lock); |
686 | |||
687 | /* clear pending external calls set by sigp interpretation facility */ | ||
688 | atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); | ||
689 | atomic_clear_mask(SIGP_CTRL_C, | ||
690 | &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl); | ||
525 | } | 691 | } |
526 | 692 | ||
527 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | 693 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) |
@@ -554,9 +720,8 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
554 | } while (deliver); | 720 | } while (deliver); |
555 | } | 721 | } |
556 | 722 | ||
557 | if ((vcpu->arch.sie_block->ckc < | 723 | if (kvm_cpu_has_pending_timer(vcpu)) |
558 | get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) | 724 | deliver_ckc_interrupt(vcpu); |
559 | __try_deliver_ckc_interrupt(vcpu); | ||
560 | 725 | ||
561 | if (atomic_read(&fi->active)) { | 726 | if (atomic_read(&fi->active)) { |
562 | do { | 727 | do { |
@@ -660,6 +825,31 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) | |||
660 | return 0; | 825 | return 0; |
661 | } | 826 | } |
662 | 827 | ||
828 | int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, | ||
829 | struct kvm_s390_pgm_info *pgm_info) | ||
830 | { | ||
831 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | ||
832 | struct kvm_s390_interrupt_info *inti; | ||
833 | |||
834 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
835 | if (!inti) | ||
836 | return -ENOMEM; | ||
837 | |||
838 | VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)", | ||
839 | pgm_info->code); | ||
840 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, | ||
841 | pgm_info->code, 0, 1); | ||
842 | |||
843 | inti->type = KVM_S390_PROGRAM_INT; | ||
844 | memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); | ||
845 | spin_lock_bh(&li->lock); | ||
846 | list_add(&inti->list, &li->list); | ||
847 | atomic_set(&li->active, 1); | ||
848 | BUG_ON(waitqueue_active(li->wq)); | ||
849 | spin_unlock_bh(&li->lock); | ||
850 | return 0; | ||
851 | } | ||
852 | |||
663 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, | 853 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
664 | u64 cr6, u64 schid) | 854 | u64 cr6, u64 schid) |
665 | { | 855 | { |
@@ -810,6 +1000,12 @@ int kvm_s390_inject_vm(struct kvm *kvm, | |||
810 | return __inject_vm(kvm, inti); | 1000 | return __inject_vm(kvm, inti); |
811 | } | 1001 | } |
812 | 1002 | ||
1003 | void kvm_s390_reinject_io_int(struct kvm *kvm, | ||
1004 | struct kvm_s390_interrupt_info *inti) | ||
1005 | { | ||
1006 | __inject_vm(kvm, inti); | ||
1007 | } | ||
1008 | |||
813 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | 1009 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, |
814 | struct kvm_s390_interrupt *s390int) | 1010 | struct kvm_s390_interrupt *s390int) |
815 | { | 1011 | { |
@@ -839,6 +1035,8 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
839 | break; | 1035 | break; |
840 | case KVM_S390_SIGP_STOP: | 1036 | case KVM_S390_SIGP_STOP: |
841 | case KVM_S390_RESTART: | 1037 | case KVM_S390_RESTART: |
1038 | case KVM_S390_INT_CLOCK_COMP: | ||
1039 | case KVM_S390_INT_CPU_TIMER: | ||
842 | VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); | 1040 | VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); |
843 | inti->type = s390int->type; | 1041 | inti->type = s390int->type; |
844 | break; | 1042 | break; |
@@ -900,7 +1098,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
900 | return 0; | 1098 | return 0; |
901 | } | 1099 | } |
902 | 1100 | ||
903 | static void clear_floating_interrupts(struct kvm *kvm) | 1101 | void kvm_s390_clear_float_irqs(struct kvm *kvm) |
904 | { | 1102 | { |
905 | struct kvm_s390_float_interrupt *fi; | 1103 | struct kvm_s390_float_interrupt *fi; |
906 | struct kvm_s390_interrupt_info *n, *inti = NULL; | 1104 | struct kvm_s390_interrupt_info *n, *inti = NULL; |
@@ -1246,7 +1444,7 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
1246 | break; | 1444 | break; |
1247 | case KVM_DEV_FLIC_CLEAR_IRQS: | 1445 | case KVM_DEV_FLIC_CLEAR_IRQS: |
1248 | r = 0; | 1446 | r = 0; |
1249 | clear_floating_interrupts(dev->kvm); | 1447 | kvm_s390_clear_float_irqs(dev->kvm); |
1250 | break; | 1448 | break; |
1251 | case KVM_DEV_FLIC_APF_ENABLE: | 1449 | case KVM_DEV_FLIC_APF_ENABLE: |
1252 | dev->kvm->arch.gmap->pfault_enabled = 1; | 1450 | dev->kvm->arch.gmap->pfault_enabled = 1; |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 825fe7bf95a6..2f3e14fe91a4 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -11,6 +11,7 @@ | |||
11 | * Christian Borntraeger <borntraeger@de.ibm.com> | 11 | * Christian Borntraeger <borntraeger@de.ibm.com> |
12 | * Heiko Carstens <heiko.carstens@de.ibm.com> | 12 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
13 | * Christian Ehrhardt <ehrhardt@de.ibm.com> | 13 | * Christian Ehrhardt <ehrhardt@de.ibm.com> |
14 | * Jason J. Herne <jjherne@us.ibm.com> | ||
14 | */ | 15 | */ |
15 | 16 | ||
16 | #include <linux/compiler.h> | 17 | #include <linux/compiler.h> |
@@ -51,6 +52,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
51 | { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, | 52 | { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, |
52 | { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, | 53 | { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, |
53 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, | 54 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, |
55 | { "instruction_stctl", VCPU_STAT(instruction_stctl) }, | ||
56 | { "instruction_stctg", VCPU_STAT(instruction_stctg) }, | ||
54 | { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, | 57 | { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, |
55 | { "deliver_external_call", VCPU_STAT(deliver_external_call) }, | 58 | { "deliver_external_call", VCPU_STAT(deliver_external_call) }, |
56 | { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, | 59 | { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, |
@@ -66,6 +69,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
66 | { "instruction_stpx", VCPU_STAT(instruction_stpx) }, | 69 | { "instruction_stpx", VCPU_STAT(instruction_stpx) }, |
67 | { "instruction_stap", VCPU_STAT(instruction_stap) }, | 70 | { "instruction_stap", VCPU_STAT(instruction_stap) }, |
68 | { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, | 71 | { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, |
72 | { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) }, | ||
69 | { "instruction_stsch", VCPU_STAT(instruction_stsch) }, | 73 | { "instruction_stsch", VCPU_STAT(instruction_stsch) }, |
70 | { "instruction_chsc", VCPU_STAT(instruction_chsc) }, | 74 | { "instruction_chsc", VCPU_STAT(instruction_chsc) }, |
71 | { "instruction_essa", VCPU_STAT(instruction_essa) }, | 75 | { "instruction_essa", VCPU_STAT(instruction_essa) }, |
@@ -90,7 +94,7 @@ unsigned long *vfacilities; | |||
90 | static struct gmap_notifier gmap_notifier; | 94 | static struct gmap_notifier gmap_notifier; |
91 | 95 | ||
92 | /* test availability of vfacility */ | 96 | /* test availability of vfacility */ |
93 | static inline int test_vfacility(unsigned long nr) | 97 | int test_vfacility(unsigned long nr) |
94 | { | 98 | { |
95 | return __test_facility(nr, (void *) vfacilities); | 99 | return __test_facility(nr, (void *) vfacilities); |
96 | } | 100 | } |
@@ -162,6 +166,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
162 | case KVM_CAP_IOEVENTFD: | 166 | case KVM_CAP_IOEVENTFD: |
163 | case KVM_CAP_DEVICE_CTRL: | 167 | case KVM_CAP_DEVICE_CTRL: |
164 | case KVM_CAP_ENABLE_CAP_VM: | 168 | case KVM_CAP_ENABLE_CAP_VM: |
169 | case KVM_CAP_VM_ATTRIBUTES: | ||
165 | r = 1; | 170 | r = 1; |
166 | break; | 171 | break; |
167 | case KVM_CAP_NR_VCPUS: | 172 | case KVM_CAP_NR_VCPUS: |
@@ -180,6 +185,25 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
180 | return r; | 185 | return r; |
181 | } | 186 | } |
182 | 187 | ||
188 | static void kvm_s390_sync_dirty_log(struct kvm *kvm, | ||
189 | struct kvm_memory_slot *memslot) | ||
190 | { | ||
191 | gfn_t cur_gfn, last_gfn; | ||
192 | unsigned long address; | ||
193 | struct gmap *gmap = kvm->arch.gmap; | ||
194 | |||
195 | down_read(&gmap->mm->mmap_sem); | ||
196 | /* Loop over all guest pages */ | ||
197 | last_gfn = memslot->base_gfn + memslot->npages; | ||
198 | for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) { | ||
199 | address = gfn_to_hva_memslot(memslot, cur_gfn); | ||
200 | |||
201 | if (gmap_test_and_clear_dirty(address, gmap)) | ||
202 | mark_page_dirty(kvm, cur_gfn); | ||
203 | } | ||
204 | up_read(&gmap->mm->mmap_sem); | ||
205 | } | ||
206 | |||
183 | /* Section: vm related */ | 207 | /* Section: vm related */ |
184 | /* | 208 | /* |
185 | * Get (and clear) the dirty memory log for a memory slot. | 209 | * Get (and clear) the dirty memory log for a memory slot. |
@@ -187,7 +211,36 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
187 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | 211 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
188 | struct kvm_dirty_log *log) | 212 | struct kvm_dirty_log *log) |
189 | { | 213 | { |
190 | return 0; | 214 | int r; |
215 | unsigned long n; | ||
216 | struct kvm_memory_slot *memslot; | ||
217 | int is_dirty = 0; | ||
218 | |||
219 | mutex_lock(&kvm->slots_lock); | ||
220 | |||
221 | r = -EINVAL; | ||
222 | if (log->slot >= KVM_USER_MEM_SLOTS) | ||
223 | goto out; | ||
224 | |||
225 | memslot = id_to_memslot(kvm->memslots, log->slot); | ||
226 | r = -ENOENT; | ||
227 | if (!memslot->dirty_bitmap) | ||
228 | goto out; | ||
229 | |||
230 | kvm_s390_sync_dirty_log(kvm, memslot); | ||
231 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | ||
232 | if (r) | ||
233 | goto out; | ||
234 | |||
235 | /* Clear the dirty log */ | ||
236 | if (is_dirty) { | ||
237 | n = kvm_dirty_bitmap_bytes(memslot); | ||
238 | memset(memslot->dirty_bitmap, 0, n); | ||
239 | } | ||
240 | r = 0; | ||
241 | out: | ||
242 | mutex_unlock(&kvm->slots_lock); | ||
243 | return r; | ||
191 | } | 244 | } |
192 | 245 | ||
193 | static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) | 246 | static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) |
@@ -209,11 +262,86 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) | |||
209 | return r; | 262 | return r; |
210 | } | 263 | } |
211 | 264 | ||
265 | static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) | ||
266 | { | ||
267 | int ret; | ||
268 | unsigned int idx; | ||
269 | switch (attr->attr) { | ||
270 | case KVM_S390_VM_MEM_ENABLE_CMMA: | ||
271 | ret = -EBUSY; | ||
272 | mutex_lock(&kvm->lock); | ||
273 | if (atomic_read(&kvm->online_vcpus) == 0) { | ||
274 | kvm->arch.use_cmma = 1; | ||
275 | ret = 0; | ||
276 | } | ||
277 | mutex_unlock(&kvm->lock); | ||
278 | break; | ||
279 | case KVM_S390_VM_MEM_CLR_CMMA: | ||
280 | mutex_lock(&kvm->lock); | ||
281 | idx = srcu_read_lock(&kvm->srcu); | ||
282 | page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false); | ||
283 | srcu_read_unlock(&kvm->srcu, idx); | ||
284 | mutex_unlock(&kvm->lock); | ||
285 | ret = 0; | ||
286 | break; | ||
287 | default: | ||
288 | ret = -ENXIO; | ||
289 | break; | ||
290 | } | ||
291 | return ret; | ||
292 | } | ||
293 | |||
294 | static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) | ||
295 | { | ||
296 | int ret; | ||
297 | |||
298 | switch (attr->group) { | ||
299 | case KVM_S390_VM_MEM_CTRL: | ||
300 | ret = kvm_s390_mem_control(kvm, attr); | ||
301 | break; | ||
302 | default: | ||
303 | ret = -ENXIO; | ||
304 | break; | ||
305 | } | ||
306 | |||
307 | return ret; | ||
308 | } | ||
309 | |||
310 | static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) | ||
311 | { | ||
312 | return -ENXIO; | ||
313 | } | ||
314 | |||
315 | static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) | ||
316 | { | ||
317 | int ret; | ||
318 | |||
319 | switch (attr->group) { | ||
320 | case KVM_S390_VM_MEM_CTRL: | ||
321 | switch (attr->attr) { | ||
322 | case KVM_S390_VM_MEM_ENABLE_CMMA: | ||
323 | case KVM_S390_VM_MEM_CLR_CMMA: | ||
324 | ret = 0; | ||
325 | break; | ||
326 | default: | ||
327 | ret = -ENXIO; | ||
328 | break; | ||
329 | } | ||
330 | break; | ||
331 | default: | ||
332 | ret = -ENXIO; | ||
333 | break; | ||
334 | } | ||
335 | |||
336 | return ret; | ||
337 | } | ||
338 | |||
212 | long kvm_arch_vm_ioctl(struct file *filp, | 339 | long kvm_arch_vm_ioctl(struct file *filp, |
213 | unsigned int ioctl, unsigned long arg) | 340 | unsigned int ioctl, unsigned long arg) |
214 | { | 341 | { |
215 | struct kvm *kvm = filp->private_data; | 342 | struct kvm *kvm = filp->private_data; |
216 | void __user *argp = (void __user *)arg; | 343 | void __user *argp = (void __user *)arg; |
344 | struct kvm_device_attr attr; | ||
217 | int r; | 345 | int r; |
218 | 346 | ||
219 | switch (ioctl) { | 347 | switch (ioctl) { |
@@ -246,6 +374,27 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
246 | } | 374 | } |
247 | break; | 375 | break; |
248 | } | 376 | } |
377 | case KVM_SET_DEVICE_ATTR: { | ||
378 | r = -EFAULT; | ||
379 | if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) | ||
380 | break; | ||
381 | r = kvm_s390_vm_set_attr(kvm, &attr); | ||
382 | break; | ||
383 | } | ||
384 | case KVM_GET_DEVICE_ATTR: { | ||
385 | r = -EFAULT; | ||
386 | if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) | ||
387 | break; | ||
388 | r = kvm_s390_vm_get_attr(kvm, &attr); | ||
389 | break; | ||
390 | } | ||
391 | case KVM_HAS_DEVICE_ATTR: { | ||
392 | r = -EFAULT; | ||
393 | if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) | ||
394 | break; | ||
395 | r = kvm_s390_vm_has_attr(kvm, &attr); | ||
396 | break; | ||
397 | } | ||
249 | default: | 398 | default: |
250 | r = -ENOTTY; | 399 | r = -ENOTTY; |
251 | } | 400 | } |
@@ -292,6 +441,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
292 | 441 | ||
293 | spin_lock_init(&kvm->arch.float_int.lock); | 442 | spin_lock_init(&kvm->arch.float_int.lock); |
294 | INIT_LIST_HEAD(&kvm->arch.float_int.list); | 443 | INIT_LIST_HEAD(&kvm->arch.float_int.list); |
444 | init_waitqueue_head(&kvm->arch.ipte_wq); | ||
295 | 445 | ||
296 | debug_register_view(kvm->arch.dbf, &debug_sprintf_view); | 446 | debug_register_view(kvm->arch.dbf, &debug_sprintf_view); |
297 | VM_EVENT(kvm, 3, "%s", "vm created"); | 447 | VM_EVENT(kvm, 3, "%s", "vm created"); |
@@ -309,6 +459,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
309 | kvm->arch.css_support = 0; | 459 | kvm->arch.css_support = 0; |
310 | kvm->arch.use_irqchip = 0; | 460 | kvm->arch.use_irqchip = 0; |
311 | 461 | ||
462 | spin_lock_init(&kvm->arch.start_stop_lock); | ||
463 | |||
312 | return 0; | 464 | return 0; |
313 | out_nogmap: | 465 | out_nogmap: |
314 | debug_unregister(kvm->arch.dbf); | 466 | debug_unregister(kvm->arch.dbf); |
@@ -322,6 +474,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |||
322 | { | 474 | { |
323 | VCPU_EVENT(vcpu, 3, "%s", "free cpu"); | 475 | VCPU_EVENT(vcpu, 3, "%s", "free cpu"); |
324 | trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); | 476 | trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); |
477 | kvm_s390_clear_local_irqs(vcpu); | ||
325 | kvm_clear_async_pf_completion_queue(vcpu); | 478 | kvm_clear_async_pf_completion_queue(vcpu); |
326 | if (!kvm_is_ucontrol(vcpu->kvm)) { | 479 | if (!kvm_is_ucontrol(vcpu->kvm)) { |
327 | clear_bit(63 - vcpu->vcpu_id, | 480 | clear_bit(63 - vcpu->vcpu_id, |
@@ -335,9 +488,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |||
335 | if (kvm_is_ucontrol(vcpu->kvm)) | 488 | if (kvm_is_ucontrol(vcpu->kvm)) |
336 | gmap_free(vcpu->arch.gmap); | 489 | gmap_free(vcpu->arch.gmap); |
337 | 490 | ||
338 | if (vcpu->arch.sie_block->cbrlo) | 491 | if (kvm_s390_cmma_enabled(vcpu->kvm)) |
339 | __free_page(__pfn_to_page( | 492 | kvm_s390_vcpu_unsetup_cmma(vcpu); |
340 | vcpu->arch.sie_block->cbrlo >> PAGE_SHIFT)); | ||
341 | free_page((unsigned long)(vcpu->arch.sie_block)); | 493 | free_page((unsigned long)(vcpu->arch.sie_block)); |
342 | 494 | ||
343 | kvm_vcpu_uninit(vcpu); | 495 | kvm_vcpu_uninit(vcpu); |
@@ -372,6 +524,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
372 | if (!kvm_is_ucontrol(kvm)) | 524 | if (!kvm_is_ucontrol(kvm)) |
373 | gmap_free(kvm->arch.gmap); | 525 | gmap_free(kvm->arch.gmap); |
374 | kvm_s390_destroy_adapters(kvm); | 526 | kvm_s390_destroy_adapters(kvm); |
527 | kvm_s390_clear_float_irqs(kvm); | ||
375 | } | 528 | } |
376 | 529 | ||
377 | /* Section: vcpu related */ | 530 | /* Section: vcpu related */ |
@@ -442,7 +595,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) | |||
442 | vcpu->arch.sie_block->pp = 0; | 595 | vcpu->arch.sie_block->pp = 0; |
443 | vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; | 596 | vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; |
444 | kvm_clear_async_pf_completion_queue(vcpu); | 597 | kvm_clear_async_pf_completion_queue(vcpu); |
445 | atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | 598 | kvm_s390_vcpu_stop(vcpu); |
446 | kvm_s390_clear_local_irqs(vcpu); | 599 | kvm_s390_clear_local_irqs(vcpu); |
447 | } | 600 | } |
448 | 601 | ||
@@ -451,9 +604,26 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | |||
451 | return 0; | 604 | return 0; |
452 | } | 605 | } |
453 | 606 | ||
607 | void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) | ||
608 | { | ||
609 | free_page(vcpu->arch.sie_block->cbrlo); | ||
610 | vcpu->arch.sie_block->cbrlo = 0; | ||
611 | } | ||
612 | |||
613 | int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) | ||
614 | { | ||
615 | vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); | ||
616 | if (!vcpu->arch.sie_block->cbrlo) | ||
617 | return -ENOMEM; | ||
618 | |||
619 | vcpu->arch.sie_block->ecb2 |= 0x80; | ||
620 | vcpu->arch.sie_block->ecb2 &= ~0x08; | ||
621 | return 0; | ||
622 | } | ||
623 | |||
454 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 624 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
455 | { | 625 | { |
456 | struct page *cbrl; | 626 | int rc = 0; |
457 | 627 | ||
458 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | | 628 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | |
459 | CPUSTAT_SM | | 629 | CPUSTAT_SM | |
@@ -464,15 +634,17 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
464 | vcpu->arch.sie_block->ecb |= 0x10; | 634 | vcpu->arch.sie_block->ecb |= 0x10; |
465 | 635 | ||
466 | vcpu->arch.sie_block->ecb2 = 8; | 636 | vcpu->arch.sie_block->ecb2 = 8; |
467 | vcpu->arch.sie_block->eca = 0xC1002001U; | 637 | vcpu->arch.sie_block->eca = 0xD1002000U; |
638 | if (sclp_has_siif()) | ||
639 | vcpu->arch.sie_block->eca |= 1; | ||
468 | vcpu->arch.sie_block->fac = (int) (long) vfacilities; | 640 | vcpu->arch.sie_block->fac = (int) (long) vfacilities; |
469 | if (kvm_enabled_cmma()) { | 641 | vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | |
470 | cbrl = alloc_page(GFP_KERNEL | __GFP_ZERO); | 642 | ICTL_TPROT; |
471 | if (cbrl) { | 643 | |
472 | vcpu->arch.sie_block->ecb2 |= 0x80; | 644 | if (kvm_s390_cmma_enabled(vcpu->kvm)) { |
473 | vcpu->arch.sie_block->ecb2 &= ~0x08; | 645 | rc = kvm_s390_vcpu_setup_cmma(vcpu); |
474 | vcpu->arch.sie_block->cbrlo = page_to_phys(cbrl); | 646 | if (rc) |
475 | } | 647 | return rc; |
476 | } | 648 | } |
477 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); | 649 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
478 | tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, | 650 | tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, |
@@ -480,7 +652,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
480 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; | 652 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; |
481 | get_cpu_id(&vcpu->arch.cpu_id); | 653 | get_cpu_id(&vcpu->arch.cpu_id); |
482 | vcpu->arch.cpu_id.version = 0xff; | 654 | vcpu->arch.cpu_id.version = 0xff; |
483 | return 0; | 655 | return rc; |
484 | } | 656 | } |
485 | 657 | ||
486 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | 658 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, |
@@ -584,7 +756,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) | |||
584 | 756 | ||
585 | kvm_for_each_vcpu(i, vcpu, kvm) { | 757 | kvm_for_each_vcpu(i, vcpu, kvm) { |
586 | /* match against both prefix pages */ | 758 | /* match against both prefix pages */ |
587 | if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) { | 759 | if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) { |
588 | VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); | 760 | VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); |
589 | kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); | 761 | kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); |
590 | exit_sie_sync(vcpu); | 762 | exit_sie_sync(vcpu); |
@@ -769,10 +941,40 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |||
769 | return -EINVAL; /* not implemented yet */ | 941 | return -EINVAL; /* not implemented yet */ |
770 | } | 942 | } |
771 | 943 | ||
944 | #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \ | ||
945 | KVM_GUESTDBG_USE_HW_BP | \ | ||
946 | KVM_GUESTDBG_ENABLE) | ||
947 | |||
772 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | 948 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
773 | struct kvm_guest_debug *dbg) | 949 | struct kvm_guest_debug *dbg) |
774 | { | 950 | { |
775 | return -EINVAL; /* not implemented yet */ | 951 | int rc = 0; |
952 | |||
953 | vcpu->guest_debug = 0; | ||
954 | kvm_s390_clear_bp_data(vcpu); | ||
955 | |||
956 | if (dbg->control & ~VALID_GUESTDBG_FLAGS) | ||
957 | return -EINVAL; | ||
958 | |||
959 | if (dbg->control & KVM_GUESTDBG_ENABLE) { | ||
960 | vcpu->guest_debug = dbg->control; | ||
961 | /* enforce guest PER */ | ||
962 | atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); | ||
963 | |||
964 | if (dbg->control & KVM_GUESTDBG_USE_HW_BP) | ||
965 | rc = kvm_s390_import_bp_data(vcpu, dbg); | ||
966 | } else { | ||
967 | atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); | ||
968 | vcpu->arch.guestdbg.last_bp = 0; | ||
969 | } | ||
970 | |||
971 | if (rc) { | ||
972 | vcpu->guest_debug = 0; | ||
973 | kvm_s390_clear_bp_data(vcpu); | ||
974 | atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); | ||
975 | } | ||
976 | |||
977 | return rc; | ||
776 | } | 978 | } |
777 | 979 | ||
778 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 980 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
@@ -787,8 +989,27 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |||
787 | return -EINVAL; /* not implemented yet */ | 989 | return -EINVAL; /* not implemented yet */ |
788 | } | 990 | } |
789 | 991 | ||
992 | bool kvm_s390_cmma_enabled(struct kvm *kvm) | ||
993 | { | ||
994 | if (!MACHINE_IS_LPAR) | ||
995 | return false; | ||
996 | /* only enable for z10 and later */ | ||
997 | if (!MACHINE_HAS_EDAT1) | ||
998 | return false; | ||
999 | if (!kvm->arch.use_cmma) | ||
1000 | return false; | ||
1001 | return true; | ||
1002 | } | ||
1003 | |||
1004 | static bool ibs_enabled(struct kvm_vcpu *vcpu) | ||
1005 | { | ||
1006 | return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; | ||
1007 | } | ||
1008 | |||
790 | static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) | 1009 | static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) |
791 | { | 1010 | { |
1011 | retry: | ||
1012 | s390_vcpu_unblock(vcpu); | ||
792 | /* | 1013 | /* |
793 | * We use MMU_RELOAD just to re-arm the ipte notifier for the | 1014 | * We use MMU_RELOAD just to re-arm the ipte notifier for the |
794 | * guest prefix page. gmap_ipte_notify will wait on the ptl lock. | 1015 | * guest prefix page. gmap_ipte_notify will wait on the ptl lock. |
@@ -796,27 +1017,61 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) | |||
796 | * already finished. We might race against a second unmapper that | 1017 | * already finished. We might race against a second unmapper that |
797 | * wants to set the blocking bit. Lets just retry the request loop. | 1018 | * wants to set the blocking bit. Lets just retry the request loop. |
798 | */ | 1019 | */ |
799 | while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { | 1020 | if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { |
800 | int rc; | 1021 | int rc; |
801 | rc = gmap_ipte_notify(vcpu->arch.gmap, | 1022 | rc = gmap_ipte_notify(vcpu->arch.gmap, |
802 | vcpu->arch.sie_block->prefix, | 1023 | kvm_s390_get_prefix(vcpu), |
803 | PAGE_SIZE * 2); | 1024 | PAGE_SIZE * 2); |
804 | if (rc) | 1025 | if (rc) |
805 | return rc; | 1026 | return rc; |
806 | s390_vcpu_unblock(vcpu); | 1027 | goto retry; |
1028 | } | ||
1029 | |||
1030 | if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { | ||
1031 | if (!ibs_enabled(vcpu)) { | ||
1032 | trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); | ||
1033 | atomic_set_mask(CPUSTAT_IBS, | ||
1034 | &vcpu->arch.sie_block->cpuflags); | ||
1035 | } | ||
1036 | goto retry; | ||
807 | } | 1037 | } |
1038 | |||
1039 | if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { | ||
1040 | if (ibs_enabled(vcpu)) { | ||
1041 | trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); | ||
1042 | atomic_clear_mask(CPUSTAT_IBS, | ||
1043 | &vcpu->arch.sie_block->cpuflags); | ||
1044 | } | ||
1045 | goto retry; | ||
1046 | } | ||
1047 | |||
808 | return 0; | 1048 | return 0; |
809 | } | 1049 | } |
810 | 1050 | ||
811 | static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu) | 1051 | /** |
1052 | * kvm_arch_fault_in_page - fault-in guest page if necessary | ||
1053 | * @vcpu: The corresponding virtual cpu | ||
1054 | * @gpa: Guest physical address | ||
1055 | * @writable: Whether the page should be writable or not | ||
1056 | * | ||
1057 | * Make sure that a guest page has been faulted-in on the host. | ||
1058 | * | ||
1059 | * Return: Zero on success, negative error code otherwise. | ||
1060 | */ | ||
1061 | long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) | ||
812 | { | 1062 | { |
813 | long rc; | ||
814 | hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap); | ||
815 | struct mm_struct *mm = current->mm; | 1063 | struct mm_struct *mm = current->mm; |
1064 | hva_t hva; | ||
1065 | long rc; | ||
1066 | |||
1067 | hva = gmap_fault(gpa, vcpu->arch.gmap); | ||
1068 | if (IS_ERR_VALUE(hva)) | ||
1069 | return (long)hva; | ||
816 | down_read(&mm->mmap_sem); | 1070 | down_read(&mm->mmap_sem); |
817 | rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL); | 1071 | rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL); |
818 | up_read(&mm->mmap_sem); | 1072 | up_read(&mm->mmap_sem); |
819 | return rc; | 1073 | |
1074 | return rc < 0 ? rc : 0; | ||
820 | } | 1075 | } |
821 | 1076 | ||
822 | static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, | 1077 | static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, |
@@ -883,8 +1138,9 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) | |||
883 | if (!vcpu->arch.gmap->pfault_enabled) | 1138 | if (!vcpu->arch.gmap->pfault_enabled) |
884 | return 0; | 1139 | return 0; |
885 | 1140 | ||
886 | hva = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap); | 1141 | hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); |
887 | if (copy_from_guest(vcpu, &arch.pfault_token, vcpu->arch.pfault_token, 8)) | 1142 | hva += current->thread.gmap_addr & ~PAGE_MASK; |
1143 | if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) | ||
888 | return 0; | 1144 | return 0; |
889 | 1145 | ||
890 | rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); | 1146 | rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); |
@@ -917,6 +1173,11 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) | |||
917 | if (rc) | 1173 | if (rc) |
918 | return rc; | 1174 | return rc; |
919 | 1175 | ||
1176 | if (guestdbg_enabled(vcpu)) { | ||
1177 | kvm_s390_backup_guest_per_regs(vcpu); | ||
1178 | kvm_s390_patch_guest_per_regs(vcpu); | ||
1179 | } | ||
1180 | |||
920 | vcpu->arch.sie_block->icptcode = 0; | 1181 | vcpu->arch.sie_block->icptcode = 0; |
921 | cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); | 1182 | cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); |
922 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); | 1183 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); |
@@ -933,6 +1194,9 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) | |||
933 | vcpu->arch.sie_block->icptcode); | 1194 | vcpu->arch.sie_block->icptcode); |
934 | trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); | 1195 | trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); |
935 | 1196 | ||
1197 | if (guestdbg_enabled(vcpu)) | ||
1198 | kvm_s390_restore_guest_per_regs(vcpu); | ||
1199 | |||
936 | if (exit_reason >= 0) { | 1200 | if (exit_reason >= 0) { |
937 | rc = 0; | 1201 | rc = 0; |
938 | } else if (kvm_is_ucontrol(vcpu->kvm)) { | 1202 | } else if (kvm_is_ucontrol(vcpu->kvm)) { |
@@ -945,9 +1209,12 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) | |||
945 | } else if (current->thread.gmap_pfault) { | 1209 | } else if (current->thread.gmap_pfault) { |
946 | trace_kvm_s390_major_guest_pfault(vcpu); | 1210 | trace_kvm_s390_major_guest_pfault(vcpu); |
947 | current->thread.gmap_pfault = 0; | 1211 | current->thread.gmap_pfault = 0; |
948 | if (kvm_arch_setup_async_pf(vcpu) || | 1212 | if (kvm_arch_setup_async_pf(vcpu)) { |
949 | (kvm_arch_fault_in_sync(vcpu) >= 0)) | ||
950 | rc = 0; | 1213 | rc = 0; |
1214 | } else { | ||
1215 | gpa_t gpa = current->thread.gmap_addr; | ||
1216 | rc = kvm_arch_fault_in_page(vcpu, gpa, 1); | ||
1217 | } | ||
951 | } | 1218 | } |
952 | 1219 | ||
953 | if (rc == -1) { | 1220 | if (rc == -1) { |
@@ -969,16 +1236,6 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) | |||
969 | return rc; | 1236 | return rc; |
970 | } | 1237 | } |
971 | 1238 | ||
972 | bool kvm_enabled_cmma(void) | ||
973 | { | ||
974 | if (!MACHINE_IS_LPAR) | ||
975 | return false; | ||
976 | /* only enable for z10 and later */ | ||
977 | if (!MACHINE_HAS_EDAT1) | ||
978 | return false; | ||
979 | return true; | ||
980 | } | ||
981 | |||
982 | static int __vcpu_run(struct kvm_vcpu *vcpu) | 1239 | static int __vcpu_run(struct kvm_vcpu *vcpu) |
983 | { | 1240 | { |
984 | int rc, exit_reason; | 1241 | int rc, exit_reason; |
@@ -1008,7 +1265,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) | |||
1008 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | 1265 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
1009 | 1266 | ||
1010 | rc = vcpu_post_run(vcpu, exit_reason); | 1267 | rc = vcpu_post_run(vcpu, exit_reason); |
1011 | } while (!signal_pending(current) && !rc); | 1268 | } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); |
1012 | 1269 | ||
1013 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | 1270 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); |
1014 | return rc; | 1271 | return rc; |
@@ -1019,10 +1276,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1019 | int rc; | 1276 | int rc; |
1020 | sigset_t sigsaved; | 1277 | sigset_t sigsaved; |
1021 | 1278 | ||
1279 | if (guestdbg_exit_pending(vcpu)) { | ||
1280 | kvm_s390_prepare_debug_exit(vcpu); | ||
1281 | return 0; | ||
1282 | } | ||
1283 | |||
1022 | if (vcpu->sigset_active) | 1284 | if (vcpu->sigset_active) |
1023 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | 1285 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
1024 | 1286 | ||
1025 | atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | 1287 | kvm_s390_vcpu_start(vcpu); |
1026 | 1288 | ||
1027 | switch (kvm_run->exit_reason) { | 1289 | switch (kvm_run->exit_reason) { |
1028 | case KVM_EXIT_S390_SIEIC: | 1290 | case KVM_EXIT_S390_SIEIC: |
@@ -1031,6 +1293,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1031 | case KVM_EXIT_S390_RESET: | 1293 | case KVM_EXIT_S390_RESET: |
1032 | case KVM_EXIT_S390_UCONTROL: | 1294 | case KVM_EXIT_S390_UCONTROL: |
1033 | case KVM_EXIT_S390_TSCH: | 1295 | case KVM_EXIT_S390_TSCH: |
1296 | case KVM_EXIT_DEBUG: | ||
1034 | break; | 1297 | break; |
1035 | default: | 1298 | default: |
1036 | BUG(); | 1299 | BUG(); |
@@ -1056,6 +1319,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1056 | rc = -EINTR; | 1319 | rc = -EINTR; |
1057 | } | 1320 | } |
1058 | 1321 | ||
1322 | if (guestdbg_exit_pending(vcpu) && !rc) { | ||
1323 | kvm_s390_prepare_debug_exit(vcpu); | ||
1324 | rc = 0; | ||
1325 | } | ||
1326 | |||
1059 | if (rc == -EOPNOTSUPP) { | 1327 | if (rc == -EOPNOTSUPP) { |
1060 | /* intercept cannot be handled in-kernel, prepare kvm-run */ | 1328 | /* intercept cannot be handled in-kernel, prepare kvm-run */ |
1061 | kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; | 1329 | kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; |
@@ -1073,7 +1341,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1073 | 1341 | ||
1074 | kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; | 1342 | kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; |
1075 | kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; | 1343 | kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; |
1076 | kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix; | 1344 | kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); |
1077 | memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); | 1345 | memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); |
1078 | 1346 | ||
1079 | if (vcpu->sigset_active) | 1347 | if (vcpu->sigset_active) |
@@ -1083,83 +1351,52 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1083 | return rc; | 1351 | return rc; |
1084 | } | 1352 | } |
1085 | 1353 | ||
1086 | static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from, | ||
1087 | unsigned long n, int prefix) | ||
1088 | { | ||
1089 | if (prefix) | ||
1090 | return copy_to_guest(vcpu, guestdest, from, n); | ||
1091 | else | ||
1092 | return copy_to_guest_absolute(vcpu, guestdest, from, n); | ||
1093 | } | ||
1094 | |||
1095 | /* | 1354 | /* |
1096 | * store status at address | 1355 | * store status at address |
1097 | * we use have two special cases: | 1356 | * we use have two special cases: |
1098 | * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit | 1357 | * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit |
1099 | * KVM_S390_STORE_STATUS_PREFIXED: -> prefix | 1358 | * KVM_S390_STORE_STATUS_PREFIXED: -> prefix |
1100 | */ | 1359 | */ |
1101 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr) | 1360 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) |
1102 | { | 1361 | { |
1103 | unsigned char archmode = 1; | 1362 | unsigned char archmode = 1; |
1104 | int prefix; | 1363 | unsigned int px; |
1105 | u64 clkcomp; | 1364 | u64 clkcomp; |
1365 | int rc; | ||
1106 | 1366 | ||
1107 | if (addr == KVM_S390_STORE_STATUS_NOADDR) { | 1367 | if (gpa == KVM_S390_STORE_STATUS_NOADDR) { |
1108 | if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1)) | 1368 | if (write_guest_abs(vcpu, 163, &archmode, 1)) |
1109 | return -EFAULT; | 1369 | return -EFAULT; |
1110 | addr = SAVE_AREA_BASE; | 1370 | gpa = SAVE_AREA_BASE; |
1111 | prefix = 0; | 1371 | } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { |
1112 | } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) { | 1372 | if (write_guest_real(vcpu, 163, &archmode, 1)) |
1113 | if (copy_to_guest(vcpu, 163ul, &archmode, 1)) | ||
1114 | return -EFAULT; | 1373 | return -EFAULT; |
1115 | addr = SAVE_AREA_BASE; | 1374 | gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE); |
1116 | prefix = 1; | 1375 | } |
1117 | } else | 1376 | rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs), |
1118 | prefix = 0; | 1377 | vcpu->arch.guest_fpregs.fprs, 128); |
1119 | 1378 | rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs), | |
1120 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), | 1379 | vcpu->run->s.regs.gprs, 128); |
1121 | vcpu->arch.guest_fpregs.fprs, 128, prefix)) | 1380 | rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), |
1122 | return -EFAULT; | 1381 | &vcpu->arch.sie_block->gpsw, 16); |
1123 | 1382 | px = kvm_s390_get_prefix(vcpu); | |
1124 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs), | 1383 | rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), |
1125 | vcpu->run->s.regs.gprs, 128, prefix)) | 1384 | &px, 4); |
1126 | return -EFAULT; | 1385 | rc |= write_guest_abs(vcpu, |
1127 | 1386 | gpa + offsetof(struct save_area, fp_ctrl_reg), | |
1128 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw), | 1387 | &vcpu->arch.guest_fpregs.fpc, 4); |
1129 | &vcpu->arch.sie_block->gpsw, 16, prefix)) | 1388 | rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg), |
1130 | return -EFAULT; | 1389 | &vcpu->arch.sie_block->todpr, 4); |
1131 | 1390 | rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer), | |
1132 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg), | 1391 | &vcpu->arch.sie_block->cputm, 8); |
1133 | &vcpu->arch.sie_block->prefix, 4, prefix)) | ||
1134 | return -EFAULT; | ||
1135 | |||
1136 | if (__guestcopy(vcpu, | ||
1137 | addr + offsetof(struct save_area, fp_ctrl_reg), | ||
1138 | &vcpu->arch.guest_fpregs.fpc, 4, prefix)) | ||
1139 | return -EFAULT; | ||
1140 | |||
1141 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg), | ||
1142 | &vcpu->arch.sie_block->todpr, 4, prefix)) | ||
1143 | return -EFAULT; | ||
1144 | |||
1145 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer), | ||
1146 | &vcpu->arch.sie_block->cputm, 8, prefix)) | ||
1147 | return -EFAULT; | ||
1148 | |||
1149 | clkcomp = vcpu->arch.sie_block->ckc >> 8; | 1392 | clkcomp = vcpu->arch.sie_block->ckc >> 8; |
1150 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp), | 1393 | rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp), |
1151 | &clkcomp, 8, prefix)) | 1394 | &clkcomp, 8); |
1152 | return -EFAULT; | 1395 | rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs), |
1153 | 1396 | &vcpu->run->s.regs.acrs, 64); | |
1154 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), | 1397 | rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs), |
1155 | &vcpu->run->s.regs.acrs, 64, prefix)) | 1398 | &vcpu->arch.sie_block->gcr, 128); |
1156 | return -EFAULT; | 1399 | return rc ? -EFAULT : 0; |
1157 | |||
1158 | if (__guestcopy(vcpu, | ||
1159 | addr + offsetof(struct save_area, ctrl_regs), | ||
1160 | &vcpu->arch.sie_block->gcr, 128, prefix)) | ||
1161 | return -EFAULT; | ||
1162 | return 0; | ||
1163 | } | 1400 | } |
1164 | 1401 | ||
1165 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | 1402 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) |
@@ -1176,6 +1413,109 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | |||
1176 | return kvm_s390_store_status_unloaded(vcpu, addr); | 1413 | return kvm_s390_store_status_unloaded(vcpu, addr); |
1177 | } | 1414 | } |
1178 | 1415 | ||
1416 | static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) | ||
1417 | { | ||
1418 | return atomic_read(&(vcpu)->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; | ||
1419 | } | ||
1420 | |||
1421 | static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) | ||
1422 | { | ||
1423 | kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); | ||
1424 | kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); | ||
1425 | exit_sie_sync(vcpu); | ||
1426 | } | ||
1427 | |||
1428 | static void __disable_ibs_on_all_vcpus(struct kvm *kvm) | ||
1429 | { | ||
1430 | unsigned int i; | ||
1431 | struct kvm_vcpu *vcpu; | ||
1432 | |||
1433 | kvm_for_each_vcpu(i, vcpu, kvm) { | ||
1434 | __disable_ibs_on_vcpu(vcpu); | ||
1435 | } | ||
1436 | } | ||
1437 | |||
1438 | static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) | ||
1439 | { | ||
1440 | kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); | ||
1441 | kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); | ||
1442 | exit_sie_sync(vcpu); | ||
1443 | } | ||
1444 | |||
1445 | void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) | ||
1446 | { | ||
1447 | int i, online_vcpus, started_vcpus = 0; | ||
1448 | |||
1449 | if (!is_vcpu_stopped(vcpu)) | ||
1450 | return; | ||
1451 | |||
1452 | trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); | ||
1453 | /* Only one cpu at a time may enter/leave the STOPPED state. */ | ||
1454 | spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); | ||
1455 | online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); | ||
1456 | |||
1457 | for (i = 0; i < online_vcpus; i++) { | ||
1458 | if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) | ||
1459 | started_vcpus++; | ||
1460 | } | ||
1461 | |||
1462 | if (started_vcpus == 0) { | ||
1463 | /* we're the only active VCPU -> speed it up */ | ||
1464 | __enable_ibs_on_vcpu(vcpu); | ||
1465 | } else if (started_vcpus == 1) { | ||
1466 | /* | ||
1467 | * As we are starting a second VCPU, we have to disable | ||
1468 | * the IBS facility on all VCPUs to remove potentially | ||
1469 | * oustanding ENABLE requests. | ||
1470 | */ | ||
1471 | __disable_ibs_on_all_vcpus(vcpu->kvm); | ||
1472 | } | ||
1473 | |||
1474 | atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | ||
1475 | /* | ||
1476 | * Another VCPU might have used IBS while we were offline. | ||
1477 | * Let's play safe and flush the VCPU at startup. | ||
1478 | */ | ||
1479 | vcpu->arch.sie_block->ihcpu = 0xffff; | ||
1480 | spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); | ||
1481 | return; | ||
1482 | } | ||
1483 | |||
1484 | void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) | ||
1485 | { | ||
1486 | int i, online_vcpus, started_vcpus = 0; | ||
1487 | struct kvm_vcpu *started_vcpu = NULL; | ||
1488 | |||
1489 | if (is_vcpu_stopped(vcpu)) | ||
1490 | return; | ||
1491 | |||
1492 | trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); | ||
1493 | /* Only one cpu at a time may enter/leave the STOPPED state. */ | ||
1494 | spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); | ||
1495 | online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); | ||
1496 | |||
1497 | atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | ||
1498 | __disable_ibs_on_vcpu(vcpu); | ||
1499 | |||
1500 | for (i = 0; i < online_vcpus; i++) { | ||
1501 | if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { | ||
1502 | started_vcpus++; | ||
1503 | started_vcpu = vcpu->kvm->vcpus[i]; | ||
1504 | } | ||
1505 | } | ||
1506 | |||
1507 | if (started_vcpus == 1) { | ||
1508 | /* | ||
1509 | * As we only have one VCPU left, we want to enable the | ||
1510 | * IBS facility for that VCPU to speed it up. | ||
1511 | */ | ||
1512 | __enable_ibs_on_vcpu(started_vcpu); | ||
1513 | } | ||
1514 | |||
1515 | spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); | ||
1516 | return; | ||
1517 | } | ||
1518 | |||
1179 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, | 1519 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
1180 | struct kvm_enable_cap *cap) | 1520 | struct kvm_enable_cap *cap) |
1181 | { | 1521 | { |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 3c1e2274d9ea..a8655ed31616 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -28,7 +28,6 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); | |||
28 | 28 | ||
29 | /* Transactional Memory Execution related macros */ | 29 | /* Transactional Memory Execution related macros */ |
30 | #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) | 30 | #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) |
31 | #define TDB_ADDR 0x1800UL | ||
32 | #define TDB_FORMAT1 1 | 31 | #define TDB_FORMAT1 1 |
33 | #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) | 32 | #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) |
34 | 33 | ||
@@ -62,9 +61,15 @@ static inline int kvm_is_ucontrol(struct kvm *kvm) | |||
62 | #endif | 61 | #endif |
63 | } | 62 | } |
64 | 63 | ||
64 | #define GUEST_PREFIX_SHIFT 13 | ||
65 | static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu) | ||
66 | { | ||
67 | return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT; | ||
68 | } | ||
69 | |||
65 | static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) | 70 | static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) |
66 | { | 71 | { |
67 | vcpu->arch.sie_block->prefix = prefix & 0x7fffe000u; | 72 | vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT; |
68 | vcpu->arch.sie_block->ihcpu = 0xffff; | 73 | vcpu->arch.sie_block->ihcpu = 0xffff; |
69 | kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); | 74 | kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); |
70 | } | 75 | } |
@@ -130,6 +135,7 @@ void kvm_s390_tasklet(unsigned long parm); | |||
130 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); | 135 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); |
131 | void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); | 136 | void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); |
132 | void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu); | 137 | void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu); |
138 | void kvm_s390_clear_float_irqs(struct kvm *kvm); | ||
133 | int __must_check kvm_s390_inject_vm(struct kvm *kvm, | 139 | int __must_check kvm_s390_inject_vm(struct kvm *kvm, |
134 | struct kvm_s390_interrupt *s390int); | 140 | struct kvm_s390_interrupt *s390int); |
135 | int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | 141 | int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, |
@@ -137,35 +143,94 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
137 | int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); | 143 | int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); |
138 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, | 144 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
139 | u64 cr6, u64 schid); | 145 | u64 cr6, u64 schid); |
146 | void kvm_s390_reinject_io_int(struct kvm *kvm, | ||
147 | struct kvm_s390_interrupt_info *inti); | ||
140 | int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); | 148 | int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); |
141 | 149 | ||
142 | /* implemented in priv.c */ | 150 | /* implemented in priv.c */ |
151 | int is_valid_psw(psw_t *psw); | ||
143 | int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); | 152 | int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); |
144 | int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); | 153 | int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); |
145 | int kvm_s390_handle_01(struct kvm_vcpu *vcpu); | 154 | int kvm_s390_handle_01(struct kvm_vcpu *vcpu); |
146 | int kvm_s390_handle_b9(struct kvm_vcpu *vcpu); | 155 | int kvm_s390_handle_b9(struct kvm_vcpu *vcpu); |
147 | int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu); | 156 | int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu); |
157 | int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu); | ||
148 | int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu); | 158 | int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu); |
149 | int kvm_s390_handle_eb(struct kvm_vcpu *vcpu); | 159 | int kvm_s390_handle_eb(struct kvm_vcpu *vcpu); |
150 | 160 | ||
151 | /* implemented in sigp.c */ | 161 | /* implemented in sigp.c */ |
152 | int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); | 162 | int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); |
163 | int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); | ||
153 | 164 | ||
154 | /* implemented in kvm-s390.c */ | 165 | /* implemented in kvm-s390.c */ |
166 | long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); | ||
155 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); | 167 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); |
156 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); | 168 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); |
169 | void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); | ||
170 | void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); | ||
157 | void s390_vcpu_block(struct kvm_vcpu *vcpu); | 171 | void s390_vcpu_block(struct kvm_vcpu *vcpu); |
158 | void s390_vcpu_unblock(struct kvm_vcpu *vcpu); | 172 | void s390_vcpu_unblock(struct kvm_vcpu *vcpu); |
159 | void exit_sie(struct kvm_vcpu *vcpu); | 173 | void exit_sie(struct kvm_vcpu *vcpu); |
160 | void exit_sie_sync(struct kvm_vcpu *vcpu); | 174 | void exit_sie_sync(struct kvm_vcpu *vcpu); |
161 | /* are we going to support cmma? */ | 175 | int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); |
162 | bool kvm_enabled_cmma(void); | 176 | void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu); |
177 | /* is cmma enabled */ | ||
178 | bool kvm_s390_cmma_enabled(struct kvm *kvm); | ||
179 | int test_vfacility(unsigned long nr); | ||
180 | |||
163 | /* implemented in diag.c */ | 181 | /* implemented in diag.c */ |
164 | int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); | 182 | int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); |
183 | /* implemented in interrupt.c */ | ||
184 | int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, | ||
185 | struct kvm_s390_pgm_info *pgm_info); | ||
186 | |||
187 | /** | ||
188 | * kvm_s390_inject_prog_cond - conditionally inject a program check | ||
189 | * @vcpu: virtual cpu | ||
190 | * @rc: original return/error code | ||
191 | * | ||
192 | * This function is supposed to be used after regular guest access functions | ||
193 | * failed, to conditionally inject a program check to a vcpu. The typical | ||
194 | * pattern would look like | ||
195 | * | ||
196 | * rc = write_guest(vcpu, addr, data, len); | ||
197 | * if (rc) | ||
198 | * return kvm_s390_inject_prog_cond(vcpu, rc); | ||
199 | * | ||
200 | * A negative return code from guest access functions implies an internal error | ||
201 | * like e.g. out of memory. In these cases no program check should be injected | ||
202 | * to the guest. | ||
203 | * A positive value implies that an exception happened while accessing a guest's | ||
204 | * memory. In this case all data belonging to the corresponding program check | ||
205 | * has been stored in vcpu->arch.pgm and can be injected with | ||
206 | * kvm_s390_inject_prog_irq(). | ||
207 | * | ||
208 | * Returns: - the original @rc value if @rc was negative (internal error) | ||
209 | * - zero if @rc was already zero | ||
210 | * - zero or error code from injecting if @rc was positive | ||
211 | * (program check injected to @vcpu) | ||
212 | */ | ||
213 | static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc) | ||
214 | { | ||
215 | if (rc <= 0) | ||
216 | return rc; | ||
217 | return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); | ||
218 | } | ||
165 | 219 | ||
166 | /* implemented in interrupt.c */ | 220 | /* implemented in interrupt.c */ |
167 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); | 221 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); |
168 | int psw_extint_disabled(struct kvm_vcpu *vcpu); | 222 | int psw_extint_disabled(struct kvm_vcpu *vcpu); |
169 | void kvm_s390_destroy_adapters(struct kvm *kvm); | 223 | void kvm_s390_destroy_adapters(struct kvm *kvm); |
224 | int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu); | ||
225 | |||
226 | /* implemented in guestdbg.c */ | ||
227 | void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); | ||
228 | void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu); | ||
229 | void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu); | ||
230 | int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu, | ||
231 | struct kvm_guest_debug *dbg); | ||
232 | void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu); | ||
233 | void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu); | ||
234 | void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu); | ||
170 | 235 | ||
171 | #endif | 236 | #endif |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 476e9e218f43..f89c1cd67751 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -35,8 +35,8 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) | |||
35 | { | 35 | { |
36 | struct kvm_vcpu *cpup; | 36 | struct kvm_vcpu *cpup; |
37 | s64 hostclk, val; | 37 | s64 hostclk, val; |
38 | int i, rc; | ||
38 | u64 op2; | 39 | u64 op2; |
39 | int i; | ||
40 | 40 | ||
41 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 41 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
42 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 42 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
@@ -44,8 +44,9 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) | |||
44 | op2 = kvm_s390_get_base_disp_s(vcpu); | 44 | op2 = kvm_s390_get_base_disp_s(vcpu); |
45 | if (op2 & 7) /* Operand must be on a doubleword boundary */ | 45 | if (op2 & 7) /* Operand must be on a doubleword boundary */ |
46 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 46 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
47 | if (get_guest(vcpu, val, (u64 __user *) op2)) | 47 | rc = read_guest(vcpu, op2, &val, sizeof(val)); |
48 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 48 | if (rc) |
49 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
49 | 50 | ||
50 | if (store_tod_clock(&hostclk)) { | 51 | if (store_tod_clock(&hostclk)) { |
51 | kvm_s390_set_psw_cc(vcpu, 3); | 52 | kvm_s390_set_psw_cc(vcpu, 3); |
@@ -65,8 +66,8 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) | |||
65 | static int handle_set_prefix(struct kvm_vcpu *vcpu) | 66 | static int handle_set_prefix(struct kvm_vcpu *vcpu) |
66 | { | 67 | { |
67 | u64 operand2; | 68 | u64 operand2; |
68 | u32 address = 0; | 69 | u32 address; |
69 | u8 tmp; | 70 | int rc; |
70 | 71 | ||
71 | vcpu->stat.instruction_spx++; | 72 | vcpu->stat.instruction_spx++; |
72 | 73 | ||
@@ -80,14 +81,18 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) | |||
80 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 81 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
81 | 82 | ||
82 | /* get the value */ | 83 | /* get the value */ |
83 | if (get_guest(vcpu, address, (u32 __user *) operand2)) | 84 | rc = read_guest(vcpu, operand2, &address, sizeof(address)); |
84 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 85 | if (rc) |
86 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
85 | 87 | ||
86 | address = address & 0x7fffe000u; | 88 | address &= 0x7fffe000u; |
87 | 89 | ||
88 | /* make sure that the new value is valid memory */ | 90 | /* |
89 | if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || | 91 | * Make sure the new value is valid memory. We only need to check the |
90 | (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) | 92 | * first page, since address is 8k aligned and memory pieces are always |
93 | * at least 1MB aligned and have at least a size of 1MB. | ||
94 | */ | ||
95 | if (kvm_is_error_gpa(vcpu->kvm, address)) | ||
91 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 96 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
92 | 97 | ||
93 | kvm_s390_set_prefix(vcpu, address); | 98 | kvm_s390_set_prefix(vcpu, address); |
@@ -101,6 +106,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) | |||
101 | { | 106 | { |
102 | u64 operand2; | 107 | u64 operand2; |
103 | u32 address; | 108 | u32 address; |
109 | int rc; | ||
104 | 110 | ||
105 | vcpu->stat.instruction_stpx++; | 111 | vcpu->stat.instruction_stpx++; |
106 | 112 | ||
@@ -113,12 +119,12 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) | |||
113 | if (operand2 & 3) | 119 | if (operand2 & 3) |
114 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 120 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
115 | 121 | ||
116 | address = vcpu->arch.sie_block->prefix; | 122 | address = kvm_s390_get_prefix(vcpu); |
117 | address = address & 0x7fffe000u; | ||
118 | 123 | ||
119 | /* get the value */ | 124 | /* get the value */ |
120 | if (put_guest(vcpu, address, (u32 __user *)operand2)) | 125 | rc = write_guest(vcpu, operand2, &address, sizeof(address)); |
121 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 126 | if (rc) |
127 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
122 | 128 | ||
123 | VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); | 129 | VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); |
124 | trace_kvm_s390_handle_prefix(vcpu, 0, address); | 130 | trace_kvm_s390_handle_prefix(vcpu, 0, address); |
@@ -127,28 +133,44 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) | |||
127 | 133 | ||
128 | static int handle_store_cpu_address(struct kvm_vcpu *vcpu) | 134 | static int handle_store_cpu_address(struct kvm_vcpu *vcpu) |
129 | { | 135 | { |
130 | u64 useraddr; | 136 | u16 vcpu_id = vcpu->vcpu_id; |
137 | u64 ga; | ||
138 | int rc; | ||
131 | 139 | ||
132 | vcpu->stat.instruction_stap++; | 140 | vcpu->stat.instruction_stap++; |
133 | 141 | ||
134 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 142 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
135 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 143 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
136 | 144 | ||
137 | useraddr = kvm_s390_get_base_disp_s(vcpu); | 145 | ga = kvm_s390_get_base_disp_s(vcpu); |
138 | 146 | ||
139 | if (useraddr & 1) | 147 | if (ga & 1) |
140 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 148 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
141 | 149 | ||
142 | if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr)) | 150 | rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id)); |
143 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 151 | if (rc) |
152 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
144 | 153 | ||
145 | VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr); | 154 | VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga); |
146 | trace_kvm_s390_handle_stap(vcpu, useraddr); | 155 | trace_kvm_s390_handle_stap(vcpu, ga); |
147 | return 0; | 156 | return 0; |
148 | } | 157 | } |
149 | 158 | ||
159 | static void __skey_check_enable(struct kvm_vcpu *vcpu) | ||
160 | { | ||
161 | if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) | ||
162 | return; | ||
163 | |||
164 | s390_enable_skey(); | ||
165 | trace_kvm_s390_skey_related_inst(vcpu); | ||
166 | vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); | ||
167 | } | ||
168 | |||
169 | |||
150 | static int handle_skey(struct kvm_vcpu *vcpu) | 170 | static int handle_skey(struct kvm_vcpu *vcpu) |
151 | { | 171 | { |
172 | __skey_check_enable(vcpu); | ||
173 | |||
152 | vcpu->stat.instruction_storage_key++; | 174 | vcpu->stat.instruction_storage_key++; |
153 | 175 | ||
154 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 176 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
@@ -160,9 +182,21 @@ static int handle_skey(struct kvm_vcpu *vcpu) | |||
160 | return 0; | 182 | return 0; |
161 | } | 183 | } |
162 | 184 | ||
185 | static int handle_ipte_interlock(struct kvm_vcpu *vcpu) | ||
186 | { | ||
187 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | ||
188 | |||
189 | vcpu->stat.instruction_ipte_interlock++; | ||
190 | if (psw_bits(*psw).p) | ||
191 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
192 | wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); | ||
193 | psw->addr = __rewind_psw(*psw, 4); | ||
194 | VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); | ||
195 | return 0; | ||
196 | } | ||
197 | |||
163 | static int handle_test_block(struct kvm_vcpu *vcpu) | 198 | static int handle_test_block(struct kvm_vcpu *vcpu) |
164 | { | 199 | { |
165 | unsigned long hva; | ||
166 | gpa_t addr; | 200 | gpa_t addr; |
167 | int reg2; | 201 | int reg2; |
168 | 202 | ||
@@ -171,16 +205,18 @@ static int handle_test_block(struct kvm_vcpu *vcpu) | |||
171 | 205 | ||
172 | kvm_s390_get_regs_rre(vcpu, NULL, ®2); | 206 | kvm_s390_get_regs_rre(vcpu, NULL, ®2); |
173 | addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; | 207 | addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; |
208 | addr = kvm_s390_logical_to_effective(vcpu, addr); | ||
209 | if (kvm_s390_check_low_addr_protection(vcpu, addr)) | ||
210 | return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); | ||
174 | addr = kvm_s390_real_to_abs(vcpu, addr); | 211 | addr = kvm_s390_real_to_abs(vcpu, addr); |
175 | 212 | ||
176 | hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr)); | 213 | if (kvm_is_error_gpa(vcpu->kvm, addr)) |
177 | if (kvm_is_error_hva(hva)) | ||
178 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 214 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
179 | /* | 215 | /* |
180 | * We don't expect errors on modern systems, and do not care | 216 | * We don't expect errors on modern systems, and do not care |
181 | * about storage keys (yet), so let's just clear the page. | 217 | * about storage keys (yet), so let's just clear the page. |
182 | */ | 218 | */ |
183 | if (clear_user((void __user *)hva, PAGE_SIZE) != 0) | 219 | if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) |
184 | return -EFAULT; | 220 | return -EFAULT; |
185 | kvm_s390_set_psw_cc(vcpu, 0); | 221 | kvm_s390_set_psw_cc(vcpu, 0); |
186 | vcpu->run->s.regs.gprs[0] = 0; | 222 | vcpu->run->s.regs.gprs[0] = 0; |
@@ -190,9 +226,12 @@ static int handle_test_block(struct kvm_vcpu *vcpu) | |||
190 | static int handle_tpi(struct kvm_vcpu *vcpu) | 226 | static int handle_tpi(struct kvm_vcpu *vcpu) |
191 | { | 227 | { |
192 | struct kvm_s390_interrupt_info *inti; | 228 | struct kvm_s390_interrupt_info *inti; |
229 | unsigned long len; | ||
230 | u32 tpi_data[3]; | ||
231 | int cc, rc; | ||
193 | u64 addr; | 232 | u64 addr; |
194 | int cc; | ||
195 | 233 | ||
234 | rc = 0; | ||
196 | addr = kvm_s390_get_base_disp_s(vcpu); | 235 | addr = kvm_s390_get_base_disp_s(vcpu); |
197 | if (addr & 3) | 236 | if (addr & 3) |
198 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 237 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
@@ -201,30 +240,41 @@ static int handle_tpi(struct kvm_vcpu *vcpu) | |||
201 | if (!inti) | 240 | if (!inti) |
202 | goto no_interrupt; | 241 | goto no_interrupt; |
203 | cc = 1; | 242 | cc = 1; |
243 | tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; | ||
244 | tpi_data[1] = inti->io.io_int_parm; | ||
245 | tpi_data[2] = inti->io.io_int_word; | ||
204 | if (addr) { | 246 | if (addr) { |
205 | /* | 247 | /* |
206 | * Store the two-word I/O interruption code into the | 248 | * Store the two-word I/O interruption code into the |
207 | * provided area. | 249 | * provided area. |
208 | */ | 250 | */ |
209 | if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr) | 251 | len = sizeof(tpi_data) - 4; |
210 | || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2)) | 252 | rc = write_guest(vcpu, addr, &tpi_data, len); |
211 | || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4))) | 253 | if (rc) |
212 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 254 | return kvm_s390_inject_prog_cond(vcpu, rc); |
213 | } else { | 255 | } else { |
214 | /* | 256 | /* |
215 | * Store the three-word I/O interruption code into | 257 | * Store the three-word I/O interruption code into |
216 | * the appropriate lowcore area. | 258 | * the appropriate lowcore area. |
217 | */ | 259 | */ |
218 | put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID); | 260 | len = sizeof(tpi_data); |
219 | put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR); | 261 | if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) |
220 | put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM); | 262 | rc = -EFAULT; |
221 | put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD); | ||
222 | } | 263 | } |
223 | kfree(inti); | 264 | /* |
265 | * If we encounter a problem storing the interruption code, the | ||
266 | * instruction is suppressed from the guest's view: reinject the | ||
267 | * interrupt. | ||
268 | */ | ||
269 | if (!rc) | ||
270 | kfree(inti); | ||
271 | else | ||
272 | kvm_s390_reinject_io_int(vcpu->kvm, inti); | ||
224 | no_interrupt: | 273 | no_interrupt: |
225 | /* Set condition code and we're done. */ | 274 | /* Set condition code and we're done. */ |
226 | kvm_s390_set_psw_cc(vcpu, cc); | 275 | if (!rc) |
227 | return 0; | 276 | kvm_s390_set_psw_cc(vcpu, cc); |
277 | return rc ? -EFAULT : 0; | ||
228 | } | 278 | } |
229 | 279 | ||
230 | static int handle_tsch(struct kvm_vcpu *vcpu) | 280 | static int handle_tsch(struct kvm_vcpu *vcpu) |
@@ -292,10 +342,10 @@ static int handle_stfl(struct kvm_vcpu *vcpu) | |||
292 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 342 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
293 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 343 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
294 | 344 | ||
295 | rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), | 345 | rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), |
296 | vfacilities, 4); | 346 | vfacilities, 4); |
297 | if (rc) | 347 | if (rc) |
298 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 348 | return rc; |
299 | VCPU_EVENT(vcpu, 5, "store facility list value %x", | 349 | VCPU_EVENT(vcpu, 5, "store facility list value %x", |
300 | *(unsigned int *) vfacilities); | 350 | *(unsigned int *) vfacilities); |
301 | trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities); | 351 | trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities); |
@@ -314,7 +364,8 @@ static void handle_new_psw(struct kvm_vcpu *vcpu) | |||
314 | #define PSW_ADDR_24 0x0000000000ffffffUL | 364 | #define PSW_ADDR_24 0x0000000000ffffffUL |
315 | #define PSW_ADDR_31 0x000000007fffffffUL | 365 | #define PSW_ADDR_31 0x000000007fffffffUL |
316 | 366 | ||
317 | static int is_valid_psw(psw_t *psw) { | 367 | int is_valid_psw(psw_t *psw) |
368 | { | ||
318 | if (psw->mask & PSW_MASK_UNASSIGNED) | 369 | if (psw->mask & PSW_MASK_UNASSIGNED) |
319 | return 0; | 370 | return 0; |
320 | if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { | 371 | if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { |
@@ -325,6 +376,8 @@ static int is_valid_psw(psw_t *psw) { | |||
325 | return 0; | 376 | return 0; |
326 | if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) | 377 | if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) |
327 | return 0; | 378 | return 0; |
379 | if (psw->addr & 1) | ||
380 | return 0; | ||
328 | return 1; | 381 | return 1; |
329 | } | 382 | } |
330 | 383 | ||
@@ -333,6 +386,7 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) | |||
333 | psw_t *gpsw = &vcpu->arch.sie_block->gpsw; | 386 | psw_t *gpsw = &vcpu->arch.sie_block->gpsw; |
334 | psw_compat_t new_psw; | 387 | psw_compat_t new_psw; |
335 | u64 addr; | 388 | u64 addr; |
389 | int rc; | ||
336 | 390 | ||
337 | if (gpsw->mask & PSW_MASK_PSTATE) | 391 | if (gpsw->mask & PSW_MASK_PSTATE) |
338 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 392 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
@@ -340,8 +394,10 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) | |||
340 | addr = kvm_s390_get_base_disp_s(vcpu); | 394 | addr = kvm_s390_get_base_disp_s(vcpu); |
341 | if (addr & 7) | 395 | if (addr & 7) |
342 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 396 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
343 | if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) | 397 | |
344 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 398 | rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); |
399 | if (rc) | ||
400 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
345 | if (!(new_psw.mask & PSW32_MASK_BASE)) | 401 | if (!(new_psw.mask & PSW32_MASK_BASE)) |
346 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 402 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
347 | gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; | 403 | gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; |
@@ -357,6 +413,7 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) | |||
357 | { | 413 | { |
358 | psw_t new_psw; | 414 | psw_t new_psw; |
359 | u64 addr; | 415 | u64 addr; |
416 | int rc; | ||
360 | 417 | ||
361 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 418 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
362 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 419 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
@@ -364,8 +421,9 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) | |||
364 | addr = kvm_s390_get_base_disp_s(vcpu); | 421 | addr = kvm_s390_get_base_disp_s(vcpu); |
365 | if (addr & 7) | 422 | if (addr & 7) |
366 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 423 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
367 | if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) | 424 | rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); |
368 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 425 | if (rc) |
426 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
369 | vcpu->arch.sie_block->gpsw = new_psw; | 427 | vcpu->arch.sie_block->gpsw = new_psw; |
370 | if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) | 428 | if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) |
371 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 429 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
@@ -375,7 +433,9 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) | |||
375 | 433 | ||
376 | static int handle_stidp(struct kvm_vcpu *vcpu) | 434 | static int handle_stidp(struct kvm_vcpu *vcpu) |
377 | { | 435 | { |
436 | u64 stidp_data = vcpu->arch.stidp_data; | ||
378 | u64 operand2; | 437 | u64 operand2; |
438 | int rc; | ||
379 | 439 | ||
380 | vcpu->stat.instruction_stidp++; | 440 | vcpu->stat.instruction_stidp++; |
381 | 441 | ||
@@ -387,8 +447,9 @@ static int handle_stidp(struct kvm_vcpu *vcpu) | |||
387 | if (operand2 & 7) | 447 | if (operand2 & 7) |
388 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 448 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
389 | 449 | ||
390 | if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2)) | 450 | rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data)); |
391 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 451 | if (rc) |
452 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
392 | 453 | ||
393 | VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); | 454 | VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); |
394 | return 0; | 455 | return 0; |
@@ -474,9 +535,10 @@ static int handle_stsi(struct kvm_vcpu *vcpu) | |||
474 | break; | 535 | break; |
475 | } | 536 | } |
476 | 537 | ||
477 | if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { | 538 | rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE); |
478 | rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 539 | if (rc) { |
479 | goto out_exception; | 540 | rc = kvm_s390_inject_prog_cond(vcpu, rc); |
541 | goto out; | ||
480 | } | 542 | } |
481 | trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); | 543 | trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); |
482 | free_page(mem); | 544 | free_page(mem); |
@@ -485,7 +547,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) | |||
485 | return 0; | 547 | return 0; |
486 | out_no_data: | 548 | out_no_data: |
487 | kvm_s390_set_psw_cc(vcpu, 3); | 549 | kvm_s390_set_psw_cc(vcpu, 3); |
488 | out_exception: | 550 | out: |
489 | free_page(mem); | 551 | free_page(mem); |
490 | return rc; | 552 | return rc; |
491 | } | 553 | } |
@@ -496,6 +558,7 @@ static const intercept_handler_t b2_handlers[256] = { | |||
496 | [0x10] = handle_set_prefix, | 558 | [0x10] = handle_set_prefix, |
497 | [0x11] = handle_store_prefix, | 559 | [0x11] = handle_store_prefix, |
498 | [0x12] = handle_store_cpu_address, | 560 | [0x12] = handle_store_cpu_address, |
561 | [0x21] = handle_ipte_interlock, | ||
499 | [0x29] = handle_skey, | 562 | [0x29] = handle_skey, |
500 | [0x2a] = handle_skey, | 563 | [0x2a] = handle_skey, |
501 | [0x2b] = handle_skey, | 564 | [0x2b] = handle_skey, |
@@ -513,6 +576,7 @@ static const intercept_handler_t b2_handlers[256] = { | |||
513 | [0x3a] = handle_io_inst, | 576 | [0x3a] = handle_io_inst, |
514 | [0x3b] = handle_io_inst, | 577 | [0x3b] = handle_io_inst, |
515 | [0x3c] = handle_io_inst, | 578 | [0x3c] = handle_io_inst, |
579 | [0x50] = handle_ipte_interlock, | ||
516 | [0x5f] = handle_io_inst, | 580 | [0x5f] = handle_io_inst, |
517 | [0x74] = handle_io_inst, | 581 | [0x74] = handle_io_inst, |
518 | [0x76] = handle_io_inst, | 582 | [0x76] = handle_io_inst, |
@@ -591,6 +655,11 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) | |||
591 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 655 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
592 | 656 | ||
593 | start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; | 657 | start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; |
658 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { | ||
659 | if (kvm_s390_check_low_addr_protection(vcpu, start)) | ||
660 | return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); | ||
661 | } | ||
662 | |||
594 | switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { | 663 | switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { |
595 | case 0x00000000: | 664 | case 0x00000000: |
596 | end = (start + (1UL << 12)) & ~((1UL << 12) - 1); | 665 | end = (start + (1UL << 12)) & ~((1UL << 12) - 1); |
@@ -606,10 +675,15 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) | |||
606 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 675 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
607 | } | 676 | } |
608 | while (start < end) { | 677 | while (start < end) { |
609 | unsigned long useraddr; | 678 | unsigned long useraddr, abs_addr; |
610 | 679 | ||
611 | useraddr = gmap_translate(start, vcpu->arch.gmap); | 680 | /* Translate guest address to host address */ |
612 | if (IS_ERR((void *)useraddr)) | 681 | if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0) |
682 | abs_addr = kvm_s390_real_to_abs(vcpu, start); | ||
683 | else | ||
684 | abs_addr = start; | ||
685 | useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr)); | ||
686 | if (kvm_is_error_hva(useraddr)) | ||
613 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 687 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
614 | 688 | ||
615 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { | 689 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { |
@@ -618,6 +692,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) | |||
618 | } | 692 | } |
619 | 693 | ||
620 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { | 694 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { |
695 | __skey_check_enable(vcpu); | ||
621 | if (set_guest_storage_key(current->mm, useraddr, | 696 | if (set_guest_storage_key(current->mm, useraddr, |
622 | vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, | 697 | vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, |
623 | vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) | 698 | vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) |
@@ -642,7 +717,7 @@ static int handle_essa(struct kvm_vcpu *vcpu) | |||
642 | VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); | 717 | VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); |
643 | gmap = vcpu->arch.gmap; | 718 | gmap = vcpu->arch.gmap; |
644 | vcpu->stat.instruction_essa++; | 719 | vcpu->stat.instruction_essa++; |
645 | if (!kvm_enabled_cmma() || !vcpu->arch.sie_block->cbrlo) | 720 | if (!kvm_s390_cmma_enabled(vcpu->kvm)) |
646 | return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); | 721 | return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); |
647 | 722 | ||
648 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 723 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
@@ -672,7 +747,10 @@ static int handle_essa(struct kvm_vcpu *vcpu) | |||
672 | } | 747 | } |
673 | 748 | ||
674 | static const intercept_handler_t b9_handlers[256] = { | 749 | static const intercept_handler_t b9_handlers[256] = { |
750 | [0x8a] = handle_ipte_interlock, | ||
675 | [0x8d] = handle_epsw, | 751 | [0x8d] = handle_epsw, |
752 | [0x8e] = handle_ipte_interlock, | ||
753 | [0x8f] = handle_ipte_interlock, | ||
676 | [0xab] = handle_essa, | 754 | [0xab] = handle_essa, |
677 | [0xaf] = handle_pfmf, | 755 | [0xaf] = handle_pfmf, |
678 | }; | 756 | }; |
@@ -693,32 +771,67 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) | |||
693 | { | 771 | { |
694 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | 772 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; |
695 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; | 773 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; |
696 | u64 useraddr; | ||
697 | u32 val = 0; | 774 | u32 val = 0; |
698 | int reg, rc; | 775 | int reg, rc; |
776 | u64 ga; | ||
699 | 777 | ||
700 | vcpu->stat.instruction_lctl++; | 778 | vcpu->stat.instruction_lctl++; |
701 | 779 | ||
702 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 780 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
703 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 781 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
704 | 782 | ||
705 | useraddr = kvm_s390_get_base_disp_rs(vcpu); | 783 | ga = kvm_s390_get_base_disp_rs(vcpu); |
706 | 784 | ||
707 | if (useraddr & 3) | 785 | if (ga & 3) |
708 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 786 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
709 | 787 | ||
710 | VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, | 788 | VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); |
711 | useraddr); | 789 | trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); |
712 | trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr); | ||
713 | 790 | ||
714 | reg = reg1; | 791 | reg = reg1; |
715 | do { | 792 | do { |
716 | rc = get_guest(vcpu, val, (u32 __user *) useraddr); | 793 | rc = read_guest(vcpu, ga, &val, sizeof(val)); |
717 | if (rc) | 794 | if (rc) |
718 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 795 | return kvm_s390_inject_prog_cond(vcpu, rc); |
719 | vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; | 796 | vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; |
720 | vcpu->arch.sie_block->gcr[reg] |= val; | 797 | vcpu->arch.sie_block->gcr[reg] |= val; |
721 | useraddr += 4; | 798 | ga += 4; |
799 | if (reg == reg3) | ||
800 | break; | ||
801 | reg = (reg + 1) % 16; | ||
802 | } while (1); | ||
803 | |||
804 | return 0; | ||
805 | } | ||
806 | |||
807 | int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) | ||
808 | { | ||
809 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | ||
810 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; | ||
811 | u64 ga; | ||
812 | u32 val; | ||
813 | int reg, rc; | ||
814 | |||
815 | vcpu->stat.instruction_stctl++; | ||
816 | |||
817 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
818 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
819 | |||
820 | ga = kvm_s390_get_base_disp_rs(vcpu); | ||
821 | |||
822 | if (ga & 3) | ||
823 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
824 | |||
825 | VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); | ||
826 | trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); | ||
827 | |||
828 | reg = reg1; | ||
829 | do { | ||
830 | val = vcpu->arch.sie_block->gcr[reg] & 0x00000000fffffffful; | ||
831 | rc = write_guest(vcpu, ga, &val, sizeof(val)); | ||
832 | if (rc) | ||
833 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
834 | ga += 4; | ||
722 | if (reg == reg3) | 835 | if (reg == reg3) |
723 | break; | 836 | break; |
724 | reg = (reg + 1) % 16; | 837 | reg = (reg + 1) % 16; |
@@ -731,7 +844,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) | |||
731 | { | 844 | { |
732 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | 845 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; |
733 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; | 846 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; |
734 | u64 useraddr; | 847 | u64 ga, val; |
735 | int reg, rc; | 848 | int reg, rc; |
736 | 849 | ||
737 | vcpu->stat.instruction_lctlg++; | 850 | vcpu->stat.instruction_lctlg++; |
@@ -739,23 +852,58 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) | |||
739 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 852 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
740 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 853 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
741 | 854 | ||
742 | useraddr = kvm_s390_get_base_disp_rsy(vcpu); | 855 | ga = kvm_s390_get_base_disp_rsy(vcpu); |
743 | 856 | ||
744 | if (useraddr & 7) | 857 | if (ga & 7) |
745 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 858 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
746 | 859 | ||
747 | reg = reg1; | 860 | reg = reg1; |
748 | 861 | ||
749 | VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, | 862 | VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); |
750 | useraddr); | 863 | trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); |
751 | trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr); | ||
752 | 864 | ||
753 | do { | 865 | do { |
754 | rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg], | 866 | rc = read_guest(vcpu, ga, &val, sizeof(val)); |
755 | (u64 __user *) useraddr); | ||
756 | if (rc) | 867 | if (rc) |
757 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 868 | return kvm_s390_inject_prog_cond(vcpu, rc); |
758 | useraddr += 8; | 869 | vcpu->arch.sie_block->gcr[reg] = val; |
870 | ga += 8; | ||
871 | if (reg == reg3) | ||
872 | break; | ||
873 | reg = (reg + 1) % 16; | ||
874 | } while (1); | ||
875 | |||
876 | return 0; | ||
877 | } | ||
878 | |||
879 | static int handle_stctg(struct kvm_vcpu *vcpu) | ||
880 | { | ||
881 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | ||
882 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; | ||
883 | u64 ga, val; | ||
884 | int reg, rc; | ||
885 | |||
886 | vcpu->stat.instruction_stctg++; | ||
887 | |||
888 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
889 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
890 | |||
891 | ga = kvm_s390_get_base_disp_rsy(vcpu); | ||
892 | |||
893 | if (ga & 7) | ||
894 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
895 | |||
896 | reg = reg1; | ||
897 | |||
898 | VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); | ||
899 | trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); | ||
900 | |||
901 | do { | ||
902 | val = vcpu->arch.sie_block->gcr[reg]; | ||
903 | rc = write_guest(vcpu, ga, &val, sizeof(val)); | ||
904 | if (rc) | ||
905 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
906 | ga += 8; | ||
759 | if (reg == reg3) | 907 | if (reg == reg3) |
760 | break; | 908 | break; |
761 | reg = (reg + 1) % 16; | 909 | reg = (reg + 1) % 16; |
@@ -766,6 +914,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) | |||
766 | 914 | ||
767 | static const intercept_handler_t eb_handlers[256] = { | 915 | static const intercept_handler_t eb_handlers[256] = { |
768 | [0x2f] = handle_lctlg, | 916 | [0x2f] = handle_lctlg, |
917 | [0x25] = handle_stctg, | ||
769 | }; | 918 | }; |
770 | 919 | ||
771 | int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) | 920 | int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) |
@@ -781,8 +930,9 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) | |||
781 | static int handle_tprot(struct kvm_vcpu *vcpu) | 930 | static int handle_tprot(struct kvm_vcpu *vcpu) |
782 | { | 931 | { |
783 | u64 address1, address2; | 932 | u64 address1, address2; |
784 | struct vm_area_struct *vma; | 933 | unsigned long hva, gpa; |
785 | unsigned long user_address; | 934 | int ret = 0, cc = 0; |
935 | bool writable; | ||
786 | 936 | ||
787 | vcpu->stat.instruction_tprot++; | 937 | vcpu->stat.instruction_tprot++; |
788 | 938 | ||
@@ -793,32 +943,41 @@ static int handle_tprot(struct kvm_vcpu *vcpu) | |||
793 | 943 | ||
794 | /* we only handle the Linux memory detection case: | 944 | /* we only handle the Linux memory detection case: |
795 | * access key == 0 | 945 | * access key == 0 |
796 | * guest DAT == off | ||
797 | * everything else goes to userspace. */ | 946 | * everything else goes to userspace. */ |
798 | if (address2 & 0xf0) | 947 | if (address2 & 0xf0) |
799 | return -EOPNOTSUPP; | 948 | return -EOPNOTSUPP; |
800 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) | 949 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) |
801 | return -EOPNOTSUPP; | 950 | ipte_lock(vcpu); |
802 | 951 | ret = guest_translate_address(vcpu, address1, &gpa, 1); | |
803 | down_read(¤t->mm->mmap_sem); | 952 | if (ret == PGM_PROTECTION) { |
804 | user_address = __gmap_translate(address1, vcpu->arch.gmap); | 953 | /* Write protected? Try again with read-only... */ |
805 | if (IS_ERR_VALUE(user_address)) | 954 | cc = 1; |
806 | goto out_inject; | 955 | ret = guest_translate_address(vcpu, address1, &gpa, 0); |
807 | vma = find_vma(current->mm, user_address); | 956 | } |
808 | if (!vma) | 957 | if (ret) { |
809 | goto out_inject; | 958 | if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { |
810 | vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); | 959 | ret = kvm_s390_inject_program_int(vcpu, ret); |
811 | if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ)) | 960 | } else if (ret > 0) { |
812 | vcpu->arch.sie_block->gpsw.mask |= (1ul << 44); | 961 | /* Translation not available */ |
813 | if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ)) | 962 | kvm_s390_set_psw_cc(vcpu, 3); |
814 | vcpu->arch.sie_block->gpsw.mask |= (2ul << 44); | 963 | ret = 0; |
815 | 964 | } | |
816 | up_read(¤t->mm->mmap_sem); | 965 | goto out_unlock; |
817 | return 0; | 966 | } |
818 | 967 | ||
819 | out_inject: | 968 | hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); |
820 | up_read(¤t->mm->mmap_sem); | 969 | if (kvm_is_error_hva(hva)) { |
821 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 970 | ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
971 | } else { | ||
972 | if (!writable) | ||
973 | cc = 1; /* Write not permitted ==> read-only */ | ||
974 | kvm_s390_set_psw_cc(vcpu, cc); | ||
975 | /* Note: CC2 only occurs for storage keys (not supported yet) */ | ||
976 | } | ||
977 | out_unlock: | ||
978 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) | ||
979 | ipte_unlock(vcpu); | ||
980 | return ret; | ||
822 | } | 981 | } |
823 | 982 | ||
824 | int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) | 983 | int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 26caeb530a78..43079a48cc98 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -54,33 +54,23 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, | |||
54 | 54 | ||
55 | static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) | 55 | static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) |
56 | { | 56 | { |
57 | struct kvm_s390_local_interrupt *li; | 57 | struct kvm_s390_interrupt s390int = { |
58 | struct kvm_s390_interrupt_info *inti; | 58 | .type = KVM_S390_INT_EMERGENCY, |
59 | .parm = vcpu->vcpu_id, | ||
60 | }; | ||
59 | struct kvm_vcpu *dst_vcpu = NULL; | 61 | struct kvm_vcpu *dst_vcpu = NULL; |
62 | int rc = 0; | ||
60 | 63 | ||
61 | if (cpu_addr < KVM_MAX_VCPUS) | 64 | if (cpu_addr < KVM_MAX_VCPUS) |
62 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | 65 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); |
63 | if (!dst_vcpu) | 66 | if (!dst_vcpu) |
64 | return SIGP_CC_NOT_OPERATIONAL; | 67 | return SIGP_CC_NOT_OPERATIONAL; |
65 | 68 | ||
66 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | 69 | rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int); |
67 | if (!inti) | 70 | if (!rc) |
68 | return -ENOMEM; | 71 | VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); |
69 | |||
70 | inti->type = KVM_S390_INT_EMERGENCY; | ||
71 | inti->emerg.code = vcpu->vcpu_id; | ||
72 | |||
73 | li = &dst_vcpu->arch.local_int; | ||
74 | spin_lock_bh(&li->lock); | ||
75 | list_add_tail(&inti->list, &li->list); | ||
76 | atomic_set(&li->active, 1); | ||
77 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | ||
78 | if (waitqueue_active(li->wq)) | ||
79 | wake_up_interruptible(li->wq); | ||
80 | spin_unlock_bh(&li->lock); | ||
81 | VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); | ||
82 | 72 | ||
83 | return SIGP_CC_ORDER_CODE_ACCEPTED; | 73 | return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; |
84 | } | 74 | } |
85 | 75 | ||
86 | static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, | 76 | static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, |
@@ -116,33 +106,23 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, | |||
116 | 106 | ||
117 | static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) | 107 | static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) |
118 | { | 108 | { |
119 | struct kvm_s390_local_interrupt *li; | 109 | struct kvm_s390_interrupt s390int = { |
120 | struct kvm_s390_interrupt_info *inti; | 110 | .type = KVM_S390_INT_EXTERNAL_CALL, |
111 | .parm = vcpu->vcpu_id, | ||
112 | }; | ||
121 | struct kvm_vcpu *dst_vcpu = NULL; | 113 | struct kvm_vcpu *dst_vcpu = NULL; |
114 | int rc; | ||
122 | 115 | ||
123 | if (cpu_addr < KVM_MAX_VCPUS) | 116 | if (cpu_addr < KVM_MAX_VCPUS) |
124 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | 117 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); |
125 | if (!dst_vcpu) | 118 | if (!dst_vcpu) |
126 | return SIGP_CC_NOT_OPERATIONAL; | 119 | return SIGP_CC_NOT_OPERATIONAL; |
127 | 120 | ||
128 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | 121 | rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int); |
129 | if (!inti) | 122 | if (!rc) |
130 | return -ENOMEM; | 123 | VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); |
131 | 124 | ||
132 | inti->type = KVM_S390_INT_EXTERNAL_CALL; | 125 | return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; |
133 | inti->extcall.code = vcpu->vcpu_id; | ||
134 | |||
135 | li = &dst_vcpu->arch.local_int; | ||
136 | spin_lock_bh(&li->lock); | ||
137 | list_add_tail(&inti->list, &li->list); | ||
138 | atomic_set(&li->active, 1); | ||
139 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | ||
140 | if (waitqueue_active(li->wq)) | ||
141 | wake_up_interruptible(li->wq); | ||
142 | spin_unlock_bh(&li->lock); | ||
143 | VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); | ||
144 | |||
145 | return SIGP_CC_ORDER_CODE_ACCEPTED; | ||
146 | } | 126 | } |
147 | 127 | ||
148 | static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) | 128 | static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) |
@@ -235,7 +215,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
235 | struct kvm_vcpu *dst_vcpu = NULL; | 215 | struct kvm_vcpu *dst_vcpu = NULL; |
236 | struct kvm_s390_interrupt_info *inti; | 216 | struct kvm_s390_interrupt_info *inti; |
237 | int rc; | 217 | int rc; |
238 | u8 tmp; | ||
239 | 218 | ||
240 | if (cpu_addr < KVM_MAX_VCPUS) | 219 | if (cpu_addr < KVM_MAX_VCPUS) |
241 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | 220 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); |
@@ -243,10 +222,13 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
243 | return SIGP_CC_NOT_OPERATIONAL; | 222 | return SIGP_CC_NOT_OPERATIONAL; |
244 | li = &dst_vcpu->arch.local_int; | 223 | li = &dst_vcpu->arch.local_int; |
245 | 224 | ||
246 | /* make sure that the new value is valid memory */ | 225 | /* |
247 | address = address & 0x7fffe000u; | 226 | * Make sure the new value is valid memory. We only need to check the |
248 | if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || | 227 | * first page, since address is 8k aligned and memory pieces are always |
249 | copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) { | 228 | * at least 1MB aligned and have at least a size of 1MB. |
229 | */ | ||
230 | address &= 0x7fffe000u; | ||
231 | if (kvm_is_error_gpa(vcpu->kvm, address)) { | ||
250 | *reg &= 0xffffffff00000000UL; | 232 | *reg &= 0xffffffff00000000UL; |
251 | *reg |= SIGP_STATUS_INVALID_PARAMETER; | 233 | *reg |= SIGP_STATUS_INVALID_PARAMETER; |
252 | return SIGP_CC_STATUS_STORED; | 234 | return SIGP_CC_STATUS_STORED; |
@@ -456,3 +438,38 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) | |||
456 | kvm_s390_set_psw_cc(vcpu, rc); | 438 | kvm_s390_set_psw_cc(vcpu, rc); |
457 | return 0; | 439 | return 0; |
458 | } | 440 | } |
441 | |||
442 | /* | ||
443 | * Handle SIGP partial execution interception. | ||
444 | * | ||
445 | * This interception will occur at the source cpu when a source cpu sends an | ||
446 | * external call to a target cpu and the target cpu has the WAIT bit set in | ||
447 | * its cpuflags. Interception will occurr after the interrupt indicator bits at | ||
448 | * the target cpu have been set. All error cases will lead to instruction | ||
449 | * interception, therefore nothing is to be checked or prepared. | ||
450 | */ | ||
451 | int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) | ||
452 | { | ||
453 | int r3 = vcpu->arch.sie_block->ipa & 0x000f; | ||
454 | u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; | ||
455 | struct kvm_vcpu *dest_vcpu; | ||
456 | u8 order_code = kvm_s390_get_base_disp_rs(vcpu); | ||
457 | |||
458 | trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); | ||
459 | |||
460 | if (order_code == SIGP_EXTERNAL_CALL) { | ||
461 | dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | ||
462 | BUG_ON(dest_vcpu == NULL); | ||
463 | |||
464 | spin_lock_bh(&dest_vcpu->arch.local_int.lock); | ||
465 | if (waitqueue_active(&dest_vcpu->wq)) | ||
466 | wake_up_interruptible(&dest_vcpu->wq); | ||
467 | dest_vcpu->preempted = true; | ||
468 | spin_unlock_bh(&dest_vcpu->arch.local_int.lock); | ||
469 | |||
470 | kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); | ||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | return -EOPNOTSUPP; | ||
475 | } | ||
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h index 13f30f58a2df..647e9d6a4818 100644 --- a/arch/s390/kvm/trace-s390.h +++ b/arch/s390/kvm/trace-s390.h | |||
@@ -68,6 +68,27 @@ TRACE_EVENT(kvm_s390_destroy_vcpu, | |||
68 | ); | 68 | ); |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * Trace point for start and stop of vpcus. | ||
72 | */ | ||
73 | TRACE_EVENT(kvm_s390_vcpu_start_stop, | ||
74 | TP_PROTO(unsigned int id, int state), | ||
75 | TP_ARGS(id, state), | ||
76 | |||
77 | TP_STRUCT__entry( | ||
78 | __field(unsigned int, id) | ||
79 | __field(int, state) | ||
80 | ), | ||
81 | |||
82 | TP_fast_assign( | ||
83 | __entry->id = id; | ||
84 | __entry->state = state; | ||
85 | ), | ||
86 | |||
87 | TP_printk("%s cpu %d", __entry->state ? "starting" : "stopping", | ||
88 | __entry->id) | ||
89 | ); | ||
90 | |||
91 | /* | ||
71 | * Trace points for injection of interrupts, either per machine or | 92 | * Trace points for injection of interrupts, either per machine or |
72 | * per vcpu. | 93 | * per vcpu. |
73 | */ | 94 | */ |
@@ -223,6 +244,28 @@ TRACE_EVENT(kvm_s390_enable_css, | |||
223 | __entry->kvm) | 244 | __entry->kvm) |
224 | ); | 245 | ); |
225 | 246 | ||
247 | /* | ||
248 | * Trace point for enabling and disabling interlocking-and-broadcasting | ||
249 | * suppression. | ||
250 | */ | ||
251 | TRACE_EVENT(kvm_s390_enable_disable_ibs, | ||
252 | TP_PROTO(unsigned int id, int state), | ||
253 | TP_ARGS(id, state), | ||
254 | |||
255 | TP_STRUCT__entry( | ||
256 | __field(unsigned int, id) | ||
257 | __field(int, state) | ||
258 | ), | ||
259 | |||
260 | TP_fast_assign( | ||
261 | __entry->id = id; | ||
262 | __entry->state = state; | ||
263 | ), | ||
264 | |||
265 | TP_printk("%s ibs on cpu %d", | ||
266 | __entry->state ? "enabling" : "disabling", __entry->id) | ||
267 | ); | ||
268 | |||
226 | 269 | ||
227 | #endif /* _TRACE_KVMS390_H */ | 270 | #endif /* _TRACE_KVMS390_H */ |
228 | 271 | ||
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h index e8e7213d4cc5..916834d7a73a 100644 --- a/arch/s390/kvm/trace.h +++ b/arch/s390/kvm/trace.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _TRACE_KVM_H | 2 | #define _TRACE_KVM_H |
3 | 3 | ||
4 | #include <linux/tracepoint.h> | 4 | #include <linux/tracepoint.h> |
5 | #include <asm/sigp.h> | 5 | #include <asm/sie.h> |
6 | #include <asm/debug.h> | 6 | #include <asm/debug.h> |
7 | #include <asm/dis.h> | 7 | #include <asm/dis.h> |
8 | 8 | ||
@@ -30,6 +30,20 @@ | |||
30 | TP_printk("%02d[%016lx-%016lx]: " p_str, __entry->id, \ | 30 | TP_printk("%02d[%016lx-%016lx]: " p_str, __entry->id, \ |
31 | __entry->pswmask, __entry->pswaddr, p_args) | 31 | __entry->pswmask, __entry->pswaddr, p_args) |
32 | 32 | ||
33 | TRACE_EVENT(kvm_s390_skey_related_inst, | ||
34 | TP_PROTO(VCPU_PROTO_COMMON), | ||
35 | TP_ARGS(VCPU_ARGS_COMMON), | ||
36 | |||
37 | TP_STRUCT__entry( | ||
38 | VCPU_FIELD_COMMON | ||
39 | ), | ||
40 | |||
41 | TP_fast_assign( | ||
42 | VCPU_ASSIGN_COMMON | ||
43 | ), | ||
44 | VCPU_TP_PRINTK("%s", "first instruction related to skeys on vcpu") | ||
45 | ); | ||
46 | |||
33 | TRACE_EVENT(kvm_s390_major_guest_pfault, | 47 | TRACE_EVENT(kvm_s390_major_guest_pfault, |
34 | TP_PROTO(VCPU_PROTO_COMMON), | 48 | TP_PROTO(VCPU_PROTO_COMMON), |
35 | TP_ARGS(VCPU_ARGS_COMMON), | 49 | TP_ARGS(VCPU_ARGS_COMMON), |
@@ -111,17 +125,6 @@ TRACE_EVENT(kvm_s390_sie_fault, | |||
111 | VCPU_TP_PRINTK("%s", "fault in sie instruction") | 125 | VCPU_TP_PRINTK("%s", "fault in sie instruction") |
112 | ); | 126 | ); |
113 | 127 | ||
114 | #define sie_intercept_code \ | ||
115 | {0x04, "Instruction"}, \ | ||
116 | {0x08, "Program interruption"}, \ | ||
117 | {0x0C, "Instruction and program interruption"}, \ | ||
118 | {0x10, "External request"}, \ | ||
119 | {0x14, "External interruption"}, \ | ||
120 | {0x18, "I/O request"}, \ | ||
121 | {0x1C, "Wait state"}, \ | ||
122 | {0x20, "Validity"}, \ | ||
123 | {0x28, "Stop request"} | ||
124 | |||
125 | TRACE_EVENT(kvm_s390_sie_exit, | 128 | TRACE_EVENT(kvm_s390_sie_exit, |
126 | TP_PROTO(VCPU_PROTO_COMMON, u8 icptcode), | 129 | TP_PROTO(VCPU_PROTO_COMMON, u8 icptcode), |
127 | TP_ARGS(VCPU_ARGS_COMMON, icptcode), | 130 | TP_ARGS(VCPU_ARGS_COMMON, icptcode), |
@@ -151,7 +154,6 @@ TRACE_EVENT(kvm_s390_intercept_instruction, | |||
151 | TP_STRUCT__entry( | 154 | TP_STRUCT__entry( |
152 | VCPU_FIELD_COMMON | 155 | VCPU_FIELD_COMMON |
153 | __field(__u64, instruction) | 156 | __field(__u64, instruction) |
154 | __field(char, insn[8]) | ||
155 | ), | 157 | ), |
156 | 158 | ||
157 | TP_fast_assign( | 159 | TP_fast_assign( |
@@ -162,10 +164,8 @@ TRACE_EVENT(kvm_s390_intercept_instruction, | |||
162 | 164 | ||
163 | VCPU_TP_PRINTK("intercepted instruction %016llx (%s)", | 165 | VCPU_TP_PRINTK("intercepted instruction %016llx (%s)", |
164 | __entry->instruction, | 166 | __entry->instruction, |
165 | insn_to_mnemonic((unsigned char *) | 167 | __print_symbolic(icpt_insn_decoder(__entry->instruction), |
166 | &__entry->instruction, | 168 | icpt_insn_codes)) |
167 | __entry->insn, sizeof(__entry->insn)) ? | ||
168 | "unknown" : __entry->insn) | ||
169 | ); | 169 | ); |
170 | 170 | ||
171 | /* | 171 | /* |
@@ -213,18 +213,6 @@ TRACE_EVENT(kvm_s390_intercept_validity, | |||
213 | * Trace points for instructions that are of special interest. | 213 | * Trace points for instructions that are of special interest. |
214 | */ | 214 | */ |
215 | 215 | ||
216 | #define sigp_order_codes \ | ||
217 | {SIGP_SENSE, "sense"}, \ | ||
218 | {SIGP_EXTERNAL_CALL, "external call"}, \ | ||
219 | {SIGP_EMERGENCY_SIGNAL, "emergency signal"}, \ | ||
220 | {SIGP_STOP, "stop"}, \ | ||
221 | {SIGP_STOP_AND_STORE_STATUS, "stop and store status"}, \ | ||
222 | {SIGP_SET_ARCHITECTURE, "set architecture"}, \ | ||
223 | {SIGP_SET_PREFIX, "set prefix"}, \ | ||
224 | {SIGP_STORE_STATUS_AT_ADDRESS, "store status at addr"}, \ | ||
225 | {SIGP_SENSE_RUNNING, "sense running"}, \ | ||
226 | {SIGP_RESTART, "restart"} | ||
227 | |||
228 | TRACE_EVENT(kvm_s390_handle_sigp, | 216 | TRACE_EVENT(kvm_s390_handle_sigp, |
229 | TP_PROTO(VCPU_PROTO_COMMON, __u8 order_code, __u16 cpu_addr, \ | 217 | TP_PROTO(VCPU_PROTO_COMMON, __u8 order_code, __u16 cpu_addr, \ |
230 | __u32 parameter), | 218 | __u32 parameter), |
@@ -251,12 +239,28 @@ TRACE_EVENT(kvm_s390_handle_sigp, | |||
251 | __entry->cpu_addr, __entry->parameter) | 239 | __entry->cpu_addr, __entry->parameter) |
252 | ); | 240 | ); |
253 | 241 | ||
254 | #define diagnose_codes \ | 242 | TRACE_EVENT(kvm_s390_handle_sigp_pei, |
255 | {0x10, "release pages"}, \ | 243 | TP_PROTO(VCPU_PROTO_COMMON, __u8 order_code, __u16 cpu_addr), |
256 | {0x44, "time slice end"}, \ | 244 | TP_ARGS(VCPU_ARGS_COMMON, order_code, cpu_addr), |
257 | {0x308, "ipl functions"}, \ | 245 | |
258 | {0x500, "kvm hypercall"}, \ | 246 | TP_STRUCT__entry( |
259 | {0x501, "kvm breakpoint"} | 247 | VCPU_FIELD_COMMON |
248 | __field(__u8, order_code) | ||
249 | __field(__u16, cpu_addr) | ||
250 | ), | ||
251 | |||
252 | TP_fast_assign( | ||
253 | VCPU_ASSIGN_COMMON | ||
254 | __entry->order_code = order_code; | ||
255 | __entry->cpu_addr = cpu_addr; | ||
256 | ), | ||
257 | |||
258 | VCPU_TP_PRINTK("handle sigp pei order %02x (%s), cpu address %04x", | ||
259 | __entry->order_code, | ||
260 | __print_symbolic(__entry->order_code, | ||
261 | sigp_order_codes), | ||
262 | __entry->cpu_addr) | ||
263 | ); | ||
260 | 264 | ||
261 | TRACE_EVENT(kvm_s390_handle_diag, | 265 | TRACE_EVENT(kvm_s390_handle_diag, |
262 | TP_PROTO(VCPU_PROTO_COMMON, __u16 code), | 266 | TP_PROTO(VCPU_PROTO_COMMON, __u16 code), |
@@ -301,6 +305,31 @@ TRACE_EVENT(kvm_s390_handle_lctl, | |||
301 | __entry->reg1, __entry->reg3, __entry->addr) | 305 | __entry->reg1, __entry->reg3, __entry->addr) |
302 | ); | 306 | ); |
303 | 307 | ||
308 | TRACE_EVENT(kvm_s390_handle_stctl, | ||
309 | TP_PROTO(VCPU_PROTO_COMMON, int g, int reg1, int reg3, u64 addr), | ||
310 | TP_ARGS(VCPU_ARGS_COMMON, g, reg1, reg3, addr), | ||
311 | |||
312 | TP_STRUCT__entry( | ||
313 | VCPU_FIELD_COMMON | ||
314 | __field(int, g) | ||
315 | __field(int, reg1) | ||
316 | __field(int, reg3) | ||
317 | __field(u64, addr) | ||
318 | ), | ||
319 | |||
320 | TP_fast_assign( | ||
321 | VCPU_ASSIGN_COMMON | ||
322 | __entry->g = g; | ||
323 | __entry->reg1 = reg1; | ||
324 | __entry->reg3 = reg3; | ||
325 | __entry->addr = addr; | ||
326 | ), | ||
327 | |||
328 | VCPU_TP_PRINTK("%s: storing cr %x-%x to %016llx", | ||
329 | __entry->g ? "stctg" : "stctl", | ||
330 | __entry->reg1, __entry->reg3, __entry->addr) | ||
331 | ); | ||
332 | |||
304 | TRACE_EVENT(kvm_s390_handle_prefix, | 333 | TRACE_EVENT(kvm_s390_handle_prefix, |
305 | TP_PROTO(VCPU_PROTO_COMMON, int set, u32 address), | 334 | TP_PROTO(VCPU_PROTO_COMMON, int set, u32 address), |
306 | TP_ARGS(VCPU_ARGS_COMMON, set, address), | 335 | TP_ARGS(VCPU_ARGS_COMMON, set, address), |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 7881d4eb8b6b..37b8241ec784 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -834,6 +834,7 @@ void gmap_do_ipte_notify(struct mm_struct *mm, pte_t *pte) | |||
834 | } | 834 | } |
835 | spin_unlock(&gmap_notifier_lock); | 835 | spin_unlock(&gmap_notifier_lock); |
836 | } | 836 | } |
837 | EXPORT_SYMBOL_GPL(gmap_do_ipte_notify); | ||
837 | 838 | ||
838 | static inline int page_table_with_pgste(struct page *page) | 839 | static inline int page_table_with_pgste(struct page *page) |
839 | { | 840 | { |
@@ -866,8 +867,7 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, | |||
866 | atomic_set(&page->_mapcount, 0); | 867 | atomic_set(&page->_mapcount, 0); |
867 | table = (unsigned long *) page_to_phys(page); | 868 | table = (unsigned long *) page_to_phys(page); |
868 | clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); | 869 | clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); |
869 | clear_table(table + PTRS_PER_PTE, PGSTE_HR_BIT | PGSTE_HC_BIT, | 870 | clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); |
870 | PAGE_SIZE/2); | ||
871 | return table; | 871 | return table; |
872 | } | 872 | } |
873 | 873 | ||
@@ -885,8 +885,8 @@ static inline void page_table_free_pgste(unsigned long *table) | |||
885 | __free_page(page); | 885 | __free_page(page); |
886 | } | 886 | } |
887 | 887 | ||
888 | static inline unsigned long page_table_reset_pte(struct mm_struct *mm, | 888 | static inline unsigned long page_table_reset_pte(struct mm_struct *mm, pmd_t *pmd, |
889 | pmd_t *pmd, unsigned long addr, unsigned long end) | 889 | unsigned long addr, unsigned long end, bool init_skey) |
890 | { | 890 | { |
891 | pte_t *start_pte, *pte; | 891 | pte_t *start_pte, *pte; |
892 | spinlock_t *ptl; | 892 | spinlock_t *ptl; |
@@ -897,6 +897,22 @@ static inline unsigned long page_table_reset_pte(struct mm_struct *mm, | |||
897 | do { | 897 | do { |
898 | pgste = pgste_get_lock(pte); | 898 | pgste = pgste_get_lock(pte); |
899 | pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; | 899 | pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; |
900 | if (init_skey) { | ||
901 | unsigned long address; | ||
902 | |||
903 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | | ||
904 | PGSTE_GR_BIT | PGSTE_GC_BIT); | ||
905 | |||
906 | /* skip invalid and not writable pages */ | ||
907 | if (pte_val(*pte) & _PAGE_INVALID || | ||
908 | !(pte_val(*pte) & _PAGE_WRITE)) { | ||
909 | pgste_set_unlock(pte, pgste); | ||
910 | continue; | ||
911 | } | ||
912 | |||
913 | address = pte_val(*pte) & PAGE_MASK; | ||
914 | page_set_storage_key(address, PAGE_DEFAULT_KEY, 1); | ||
915 | } | ||
900 | pgste_set_unlock(pte, pgste); | 916 | pgste_set_unlock(pte, pgste); |
901 | } while (pte++, addr += PAGE_SIZE, addr != end); | 917 | } while (pte++, addr += PAGE_SIZE, addr != end); |
902 | pte_unmap_unlock(start_pte, ptl); | 918 | pte_unmap_unlock(start_pte, ptl); |
@@ -904,8 +920,8 @@ static inline unsigned long page_table_reset_pte(struct mm_struct *mm, | |||
904 | return addr; | 920 | return addr; |
905 | } | 921 | } |
906 | 922 | ||
907 | static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, | 923 | static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, pud_t *pud, |
908 | pud_t *pud, unsigned long addr, unsigned long end) | 924 | unsigned long addr, unsigned long end, bool init_skey) |
909 | { | 925 | { |
910 | unsigned long next; | 926 | unsigned long next; |
911 | pmd_t *pmd; | 927 | pmd_t *pmd; |
@@ -915,14 +931,14 @@ static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, | |||
915 | next = pmd_addr_end(addr, end); | 931 | next = pmd_addr_end(addr, end); |
916 | if (pmd_none_or_clear_bad(pmd)) | 932 | if (pmd_none_or_clear_bad(pmd)) |
917 | continue; | 933 | continue; |
918 | next = page_table_reset_pte(mm, pmd, addr, next); | 934 | next = page_table_reset_pte(mm, pmd, addr, next, init_skey); |
919 | } while (pmd++, addr = next, addr != end); | 935 | } while (pmd++, addr = next, addr != end); |
920 | 936 | ||
921 | return addr; | 937 | return addr; |
922 | } | 938 | } |
923 | 939 | ||
924 | static inline unsigned long page_table_reset_pud(struct mm_struct *mm, | 940 | static inline unsigned long page_table_reset_pud(struct mm_struct *mm, pgd_t *pgd, |
925 | pgd_t *pgd, unsigned long addr, unsigned long end) | 941 | unsigned long addr, unsigned long end, bool init_skey) |
926 | { | 942 | { |
927 | unsigned long next; | 943 | unsigned long next; |
928 | pud_t *pud; | 944 | pud_t *pud; |
@@ -932,28 +948,33 @@ static inline unsigned long page_table_reset_pud(struct mm_struct *mm, | |||
932 | next = pud_addr_end(addr, end); | 948 | next = pud_addr_end(addr, end); |
933 | if (pud_none_or_clear_bad(pud)) | 949 | if (pud_none_or_clear_bad(pud)) |
934 | continue; | 950 | continue; |
935 | next = page_table_reset_pmd(mm, pud, addr, next); | 951 | next = page_table_reset_pmd(mm, pud, addr, next, init_skey); |
936 | } while (pud++, addr = next, addr != end); | 952 | } while (pud++, addr = next, addr != end); |
937 | 953 | ||
938 | return addr; | 954 | return addr; |
939 | } | 955 | } |
940 | 956 | ||
941 | void page_table_reset_pgste(struct mm_struct *mm, | 957 | void page_table_reset_pgste(struct mm_struct *mm, unsigned long start, |
942 | unsigned long start, unsigned long end) | 958 | unsigned long end, bool init_skey) |
943 | { | 959 | { |
944 | unsigned long addr, next; | 960 | unsigned long addr, next; |
945 | pgd_t *pgd; | 961 | pgd_t *pgd; |
946 | 962 | ||
963 | down_write(&mm->mmap_sem); | ||
964 | if (init_skey && mm_use_skey(mm)) | ||
965 | goto out_up; | ||
947 | addr = start; | 966 | addr = start; |
948 | down_read(&mm->mmap_sem); | ||
949 | pgd = pgd_offset(mm, addr); | 967 | pgd = pgd_offset(mm, addr); |
950 | do { | 968 | do { |
951 | next = pgd_addr_end(addr, end); | 969 | next = pgd_addr_end(addr, end); |
952 | if (pgd_none_or_clear_bad(pgd)) | 970 | if (pgd_none_or_clear_bad(pgd)) |
953 | continue; | 971 | continue; |
954 | next = page_table_reset_pud(mm, pgd, addr, next); | 972 | next = page_table_reset_pud(mm, pgd, addr, next, init_skey); |
955 | } while (pgd++, addr = next, addr != end); | 973 | } while (pgd++, addr = next, addr != end); |
956 | up_read(&mm->mmap_sem); | 974 | if (init_skey) |
975 | current->mm->context.use_skey = 1; | ||
976 | out_up: | ||
977 | up_write(&mm->mmap_sem); | ||
957 | } | 978 | } |
958 | EXPORT_SYMBOL(page_table_reset_pgste); | 979 | EXPORT_SYMBOL(page_table_reset_pgste); |
959 | 980 | ||
@@ -991,7 +1012,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, | |||
991 | /* changing the guest storage key is considered a change of the page */ | 1012 | /* changing the guest storage key is considered a change of the page */ |
992 | if ((pgste_val(new) ^ pgste_val(old)) & | 1013 | if ((pgste_val(new) ^ pgste_val(old)) & |
993 | (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) | 1014 | (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) |
994 | pgste_val(new) |= PGSTE_HC_BIT; | 1015 | pgste_val(new) |= PGSTE_UC_BIT; |
995 | 1016 | ||
996 | pgste_set_unlock(ptep, new); | 1017 | pgste_set_unlock(ptep, new); |
997 | pte_unmap_unlock(*ptep, ptl); | 1018 | pte_unmap_unlock(*ptep, ptl); |
@@ -1013,6 +1034,11 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, | |||
1013 | return NULL; | 1034 | return NULL; |
1014 | } | 1035 | } |
1015 | 1036 | ||
1037 | void page_table_reset_pgste(struct mm_struct *mm, unsigned long start, | ||
1038 | unsigned long end, bool init_skey) | ||
1039 | { | ||
1040 | } | ||
1041 | |||
1016 | static inline void page_table_free_pgste(unsigned long *table) | 1042 | static inline void page_table_free_pgste(unsigned long *table) |
1017 | { | 1043 | { |
1018 | } | 1044 | } |
@@ -1359,6 +1385,37 @@ int s390_enable_sie(void) | |||
1359 | } | 1385 | } |
1360 | EXPORT_SYMBOL_GPL(s390_enable_sie); | 1386 | EXPORT_SYMBOL_GPL(s390_enable_sie); |
1361 | 1387 | ||
1388 | /* | ||
1389 | * Enable storage key handling from now on and initialize the storage | ||
1390 | * keys with the default key. | ||
1391 | */ | ||
1392 | void s390_enable_skey(void) | ||
1393 | { | ||
1394 | page_table_reset_pgste(current->mm, 0, TASK_SIZE, true); | ||
1395 | } | ||
1396 | EXPORT_SYMBOL_GPL(s390_enable_skey); | ||
1397 | |||
1398 | /* | ||
1399 | * Test and reset if a guest page is dirty | ||
1400 | */ | ||
1401 | bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap) | ||
1402 | { | ||
1403 | pte_t *pte; | ||
1404 | spinlock_t *ptl; | ||
1405 | bool dirty = false; | ||
1406 | |||
1407 | pte = get_locked_pte(gmap->mm, address, &ptl); | ||
1408 | if (unlikely(!pte)) | ||
1409 | return false; | ||
1410 | |||
1411 | if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte)) | ||
1412 | dirty = true; | ||
1413 | |||
1414 | spin_unlock(ptl); | ||
1415 | return dirty; | ||
1416 | } | ||
1417 | EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty); | ||
1418 | |||
1362 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 1419 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1363 | int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, | 1420 | int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, |
1364 | pmd_t *pmdp) | 1421 | pmd_t *pmdp) |