diff options
33 files changed, 2831 insertions, 501 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index a9380ba54c8e..2014ff12b492 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
@@ -2314,8 +2314,8 @@ struct kvm_create_device { | |||
2314 | 2314 | ||
2315 | 4.80 KVM_SET_DEVICE_ATTR/KVM_GET_DEVICE_ATTR | 2315 | 4.80 KVM_SET_DEVICE_ATTR/KVM_GET_DEVICE_ATTR |
2316 | 2316 | ||
2317 | Capability: KVM_CAP_DEVICE_CTRL | 2317 | Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device |
2318 | Type: device ioctl | 2318 | Type: device ioctl, vm ioctl |
2319 | Parameters: struct kvm_device_attr | 2319 | Parameters: struct kvm_device_attr |
2320 | Returns: 0 on success, -1 on error | 2320 | Returns: 0 on success, -1 on error |
2321 | Errors: | 2321 | Errors: |
@@ -2340,8 +2340,8 @@ struct kvm_device_attr { | |||
2340 | 2340 | ||
2341 | 4.81 KVM_HAS_DEVICE_ATTR | 2341 | 4.81 KVM_HAS_DEVICE_ATTR |
2342 | 2342 | ||
2343 | Capability: KVM_CAP_DEVICE_CTRL | 2343 | Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device |
2344 | Type: device ioctl | 2344 | Type: device ioctl, vm ioctl |
2345 | Parameters: struct kvm_device_attr | 2345 | Parameters: struct kvm_device_attr |
2346 | Returns: 0 on success, -1 on error | 2346 | Returns: 0 on success, -1 on error |
2347 | Errors: | 2347 | Errors: |
diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virtual/kvm/devices/vm.txt new file mode 100644 index 000000000000..0d16f96c0eac --- /dev/null +++ b/Documentation/virtual/kvm/devices/vm.txt | |||
@@ -0,0 +1,26 @@ | |||
1 | Generic vm interface | ||
2 | ==================================== | ||
3 | |||
4 | The virtual machine "device" also accepts the ioctls KVM_SET_DEVICE_ATTR, | ||
5 | KVM_GET_DEVICE_ATTR, and KVM_HAS_DEVICE_ATTR. The interface uses the same | ||
6 | struct kvm_device_attr as other devices, but targets VM-wide settings | ||
7 | and controls. | ||
8 | |||
9 | The groups and attributes per virtual machine, if any, are architecture | ||
10 | specific. | ||
11 | |||
12 | 1. GROUP: KVM_S390_VM_MEM_CTRL | ||
13 | Architectures: s390 | ||
14 | |||
15 | 1.1. ATTRIBUTE: KVM_S390_VM_MEM_CTRL | ||
16 | Parameters: none | ||
17 | Returns: -EBUSY if already a vcpus is defined, otherwise 0 | ||
18 | |||
19 | Enables CMMA for the virtual machine | ||
20 | |||
21 | 1.2. ATTRIBUTE: KVM_S390_VM_CLR_CMMA | ||
22 | Parameteres: none | ||
23 | Returns: 0 | ||
24 | |||
25 | Clear the CMMA status for all guest pages, so any pages the guest marked | ||
26 | as unused are again used any may not be reclaimed by the host. | ||
diff --git a/Documentation/virtual/kvm/s390-diag.txt b/Documentation/virtual/kvm/s390-diag.txt index f1de4fbade15..48c4921794ed 100644 --- a/Documentation/virtual/kvm/s390-diag.txt +++ b/Documentation/virtual/kvm/s390-diag.txt | |||
@@ -78,3 +78,5 @@ DIAGNOSE function code 'X'501 - KVM breakpoint | |||
78 | 78 | ||
79 | If the function code specifies 0x501, breakpoint functions may be performed. | 79 | If the function code specifies 0x501, breakpoint functions may be performed. |
80 | This function code is handled by userspace. | 80 | This function code is handled by userspace. |
81 | |||
82 | This diagnose function code has no subfunctions and uses no parameters. | ||
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index 4e63f1a13600..31ab9f346d7e 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h | |||
@@ -57,6 +57,20 @@ static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit) | |||
57 | void smp_ctl_set_bit(int cr, int bit); | 57 | void smp_ctl_set_bit(int cr, int bit); |
58 | void smp_ctl_clear_bit(int cr, int bit); | 58 | void smp_ctl_clear_bit(int cr, int bit); |
59 | 59 | ||
60 | union ctlreg0 { | ||
61 | unsigned long val; | ||
62 | struct { | ||
63 | #ifdef CONFIG_64BIT | ||
64 | unsigned long : 32; | ||
65 | #endif | ||
66 | unsigned long : 3; | ||
67 | unsigned long lap : 1; /* Low-address-protection control */ | ||
68 | unsigned long : 4; | ||
69 | unsigned long edat : 1; /* Enhanced-DAT-enablement control */ | ||
70 | unsigned long : 23; | ||
71 | }; | ||
72 | }; | ||
73 | |||
60 | #ifdef CONFIG_SMP | 74 | #ifdef CONFIG_SMP |
61 | # define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) | 75 | # define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) |
62 | # define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) | 76 | # define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 154b60089be9..0d45f6fe734f 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -39,9 +39,17 @@ struct sca_entry { | |||
39 | __u64 reserved2[2]; | 39 | __u64 reserved2[2]; |
40 | } __attribute__((packed)); | 40 | } __attribute__((packed)); |
41 | 41 | ||
42 | union ipte_control { | ||
43 | unsigned long val; | ||
44 | struct { | ||
45 | unsigned long k : 1; | ||
46 | unsigned long kh : 31; | ||
47 | unsigned long kg : 32; | ||
48 | }; | ||
49 | }; | ||
42 | 50 | ||
43 | struct sca_block { | 51 | struct sca_block { |
44 | __u64 ipte_control; | 52 | union ipte_control ipte_control; |
45 | __u64 reserved[5]; | 53 | __u64 reserved[5]; |
46 | __u64 mcn; | 54 | __u64 mcn; |
47 | __u64 reserved2; | 55 | __u64 reserved2; |
@@ -85,12 +93,26 @@ struct kvm_s390_sie_block { | |||
85 | __u8 reserved40[4]; /* 0x0040 */ | 93 | __u8 reserved40[4]; /* 0x0040 */ |
86 | #define LCTL_CR0 0x8000 | 94 | #define LCTL_CR0 0x8000 |
87 | #define LCTL_CR6 0x0200 | 95 | #define LCTL_CR6 0x0200 |
96 | #define LCTL_CR9 0x0040 | ||
97 | #define LCTL_CR10 0x0020 | ||
98 | #define LCTL_CR11 0x0010 | ||
88 | #define LCTL_CR14 0x0002 | 99 | #define LCTL_CR14 0x0002 |
89 | __u16 lctl; /* 0x0044 */ | 100 | __u16 lctl; /* 0x0044 */ |
90 | __s16 icpua; /* 0x0046 */ | 101 | __s16 icpua; /* 0x0046 */ |
91 | #define ICTL_LPSW 0x00400000 | 102 | #define ICTL_PINT 0x20000000 |
103 | #define ICTL_LPSW 0x00400000 | ||
104 | #define ICTL_STCTL 0x00040000 | ||
105 | #define ICTL_ISKE 0x00004000 | ||
106 | #define ICTL_SSKE 0x00002000 | ||
107 | #define ICTL_RRBE 0x00001000 | ||
92 | __u32 ictl; /* 0x0048 */ | 108 | __u32 ictl; /* 0x0048 */ |
93 | __u32 eca; /* 0x004c */ | 109 | __u32 eca; /* 0x004c */ |
110 | #define ICPT_INST 0x04 | ||
111 | #define ICPT_PROGI 0x08 | ||
112 | #define ICPT_INSTPROGI 0x0C | ||
113 | #define ICPT_OPEREXC 0x2C | ||
114 | #define ICPT_PARTEXEC 0x38 | ||
115 | #define ICPT_IOINST 0x40 | ||
94 | __u8 icptcode; /* 0x0050 */ | 116 | __u8 icptcode; /* 0x0050 */ |
95 | __u8 reserved51; /* 0x0051 */ | 117 | __u8 reserved51; /* 0x0051 */ |
96 | __u16 ihcpu; /* 0x0052 */ | 118 | __u16 ihcpu; /* 0x0052 */ |
@@ -109,9 +131,21 @@ struct kvm_s390_sie_block { | |||
109 | psw_t gpsw; /* 0x0090 */ | 131 | psw_t gpsw; /* 0x0090 */ |
110 | __u64 gg14; /* 0x00a0 */ | 132 | __u64 gg14; /* 0x00a0 */ |
111 | __u64 gg15; /* 0x00a8 */ | 133 | __u64 gg15; /* 0x00a8 */ |
112 | __u8 reservedb0[30]; /* 0x00b0 */ | 134 | __u8 reservedb0[28]; /* 0x00b0 */ |
113 | __u16 iprcc; /* 0x00ce */ | 135 | __u16 pgmilc; /* 0x00cc */ |
114 | __u8 reservedd0[48]; /* 0x00d0 */ | 136 | __u16 iprcc; /* 0x00ce */ |
137 | __u32 dxc; /* 0x00d0 */ | ||
138 | __u16 mcn; /* 0x00d4 */ | ||
139 | __u8 perc; /* 0x00d6 */ | ||
140 | __u8 peratmid; /* 0x00d7 */ | ||
141 | __u64 peraddr; /* 0x00d8 */ | ||
142 | __u8 eai; /* 0x00e0 */ | ||
143 | __u8 peraid; /* 0x00e1 */ | ||
144 | __u8 oai; /* 0x00e2 */ | ||
145 | __u8 armid; /* 0x00e3 */ | ||
146 | __u8 reservede4[4]; /* 0x00e4 */ | ||
147 | __u64 tecmc; /* 0x00e8 */ | ||
148 | __u8 reservedf0[16]; /* 0x00f0 */ | ||
115 | __u64 gcr[16]; /* 0x0100 */ | 149 | __u64 gcr[16]; /* 0x0100 */ |
116 | __u64 gbea; /* 0x0180 */ | 150 | __u64 gbea; /* 0x0180 */ |
117 | __u8 reserved188[24]; /* 0x0188 */ | 151 | __u8 reserved188[24]; /* 0x0188 */ |
@@ -146,6 +180,8 @@ struct kvm_vcpu_stat { | |||
146 | u32 exit_instruction; | 180 | u32 exit_instruction; |
147 | u32 instruction_lctl; | 181 | u32 instruction_lctl; |
148 | u32 instruction_lctlg; | 182 | u32 instruction_lctlg; |
183 | u32 instruction_stctl; | ||
184 | u32 instruction_stctg; | ||
149 | u32 exit_program_interruption; | 185 | u32 exit_program_interruption; |
150 | u32 exit_instr_and_program; | 186 | u32 exit_instr_and_program; |
151 | u32 deliver_external_call; | 187 | u32 deliver_external_call; |
@@ -164,6 +200,7 @@ struct kvm_vcpu_stat { | |||
164 | u32 instruction_stpx; | 200 | u32 instruction_stpx; |
165 | u32 instruction_stap; | 201 | u32 instruction_stap; |
166 | u32 instruction_storage_key; | 202 | u32 instruction_storage_key; |
203 | u32 instruction_ipte_interlock; | ||
167 | u32 instruction_stsch; | 204 | u32 instruction_stsch; |
168 | u32 instruction_chsc; | 205 | u32 instruction_chsc; |
169 | u32 instruction_stsi; | 206 | u32 instruction_stsi; |
@@ -183,13 +220,58 @@ struct kvm_vcpu_stat { | |||
183 | u32 diagnose_9c; | 220 | u32 diagnose_9c; |
184 | }; | 221 | }; |
185 | 222 | ||
186 | #define PGM_OPERATION 0x01 | 223 | #define PGM_OPERATION 0x01 |
187 | #define PGM_PRIVILEGED_OP 0x02 | 224 | #define PGM_PRIVILEGED_OP 0x02 |
188 | #define PGM_EXECUTE 0x03 | 225 | #define PGM_EXECUTE 0x03 |
189 | #define PGM_PROTECTION 0x04 | 226 | #define PGM_PROTECTION 0x04 |
190 | #define PGM_ADDRESSING 0x05 | 227 | #define PGM_ADDRESSING 0x05 |
191 | #define PGM_SPECIFICATION 0x06 | 228 | #define PGM_SPECIFICATION 0x06 |
192 | #define PGM_DATA 0x07 | 229 | #define PGM_DATA 0x07 |
230 | #define PGM_FIXED_POINT_OVERFLOW 0x08 | ||
231 | #define PGM_FIXED_POINT_DIVIDE 0x09 | ||
232 | #define PGM_DECIMAL_OVERFLOW 0x0a | ||
233 | #define PGM_DECIMAL_DIVIDE 0x0b | ||
234 | #define PGM_HFP_EXPONENT_OVERFLOW 0x0c | ||
235 | #define PGM_HFP_EXPONENT_UNDERFLOW 0x0d | ||
236 | #define PGM_HFP_SIGNIFICANCE 0x0e | ||
237 | #define PGM_HFP_DIVIDE 0x0f | ||
238 | #define PGM_SEGMENT_TRANSLATION 0x10 | ||
239 | #define PGM_PAGE_TRANSLATION 0x11 | ||
240 | #define PGM_TRANSLATION_SPEC 0x12 | ||
241 | #define PGM_SPECIAL_OPERATION 0x13 | ||
242 | #define PGM_OPERAND 0x15 | ||
243 | #define PGM_TRACE_TABEL 0x16 | ||
244 | #define PGM_SPACE_SWITCH 0x1c | ||
245 | #define PGM_HFP_SQUARE_ROOT 0x1d | ||
246 | #define PGM_PC_TRANSLATION_SPEC 0x1f | ||
247 | #define PGM_AFX_TRANSLATION 0x20 | ||
248 | #define PGM_ASX_TRANSLATION 0x21 | ||
249 | #define PGM_LX_TRANSLATION 0x22 | ||
250 | #define PGM_EX_TRANSLATION 0x23 | ||
251 | #define PGM_PRIMARY_AUTHORITY 0x24 | ||
252 | #define PGM_SECONDARY_AUTHORITY 0x25 | ||
253 | #define PGM_LFX_TRANSLATION 0x26 | ||
254 | #define PGM_LSX_TRANSLATION 0x27 | ||
255 | #define PGM_ALET_SPECIFICATION 0x28 | ||
256 | #define PGM_ALEN_TRANSLATION 0x29 | ||
257 | #define PGM_ALE_SEQUENCE 0x2a | ||
258 | #define PGM_ASTE_VALIDITY 0x2b | ||
259 | #define PGM_ASTE_SEQUENCE 0x2c | ||
260 | #define PGM_EXTENDED_AUTHORITY 0x2d | ||
261 | #define PGM_LSTE_SEQUENCE 0x2e | ||
262 | #define PGM_ASTE_INSTANCE 0x2f | ||
263 | #define PGM_STACK_FULL 0x30 | ||
264 | #define PGM_STACK_EMPTY 0x31 | ||
265 | #define PGM_STACK_SPECIFICATION 0x32 | ||
266 | #define PGM_STACK_TYPE 0x33 | ||
267 | #define PGM_STACK_OPERATION 0x34 | ||
268 | #define PGM_ASCE_TYPE 0x38 | ||
269 | #define PGM_REGION_FIRST_TRANS 0x39 | ||
270 | #define PGM_REGION_SECOND_TRANS 0x3a | ||
271 | #define PGM_REGION_THIRD_TRANS 0x3b | ||
272 | #define PGM_MONITOR 0x40 | ||
273 | #define PGM_PER 0x80 | ||
274 | #define PGM_CRYPTO_OPERATION 0x119 | ||
193 | 275 | ||
194 | struct kvm_s390_interrupt_info { | 276 | struct kvm_s390_interrupt_info { |
195 | struct list_head list; | 277 | struct list_head list; |
@@ -229,6 +311,45 @@ struct kvm_s390_float_interrupt { | |||
229 | unsigned int irq_count; | 311 | unsigned int irq_count; |
230 | }; | 312 | }; |
231 | 313 | ||
314 | struct kvm_hw_wp_info_arch { | ||
315 | unsigned long addr; | ||
316 | unsigned long phys_addr; | ||
317 | int len; | ||
318 | char *old_data; | ||
319 | }; | ||
320 | |||
321 | struct kvm_hw_bp_info_arch { | ||
322 | unsigned long addr; | ||
323 | int len; | ||
324 | }; | ||
325 | |||
326 | /* | ||
327 | * Only the upper 16 bits of kvm_guest_debug->control are arch specific. | ||
328 | * Further KVM_GUESTDBG flags which an be used from userspace can be found in | ||
329 | * arch/s390/include/uapi/asm/kvm.h | ||
330 | */ | ||
331 | #define KVM_GUESTDBG_EXIT_PENDING 0x10000000 | ||
332 | |||
333 | #define guestdbg_enabled(vcpu) \ | ||
334 | (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) | ||
335 | #define guestdbg_sstep_enabled(vcpu) \ | ||
336 | (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | ||
337 | #define guestdbg_hw_bp_enabled(vcpu) \ | ||
338 | (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) | ||
339 | #define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \ | ||
340 | (vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING)) | ||
341 | |||
342 | struct kvm_guestdbg_info_arch { | ||
343 | unsigned long cr0; | ||
344 | unsigned long cr9; | ||
345 | unsigned long cr10; | ||
346 | unsigned long cr11; | ||
347 | struct kvm_hw_bp_info_arch *hw_bp_info; | ||
348 | struct kvm_hw_wp_info_arch *hw_wp_info; | ||
349 | int nr_hw_bp; | ||
350 | int nr_hw_wp; | ||
351 | unsigned long last_bp; | ||
352 | }; | ||
232 | 353 | ||
233 | struct kvm_vcpu_arch { | 354 | struct kvm_vcpu_arch { |
234 | struct kvm_s390_sie_block *sie_block; | 355 | struct kvm_s390_sie_block *sie_block; |
@@ -238,11 +359,13 @@ struct kvm_vcpu_arch { | |||
238 | struct kvm_s390_local_interrupt local_int; | 359 | struct kvm_s390_local_interrupt local_int; |
239 | struct hrtimer ckc_timer; | 360 | struct hrtimer ckc_timer; |
240 | struct tasklet_struct tasklet; | 361 | struct tasklet_struct tasklet; |
362 | struct kvm_s390_pgm_info pgm; | ||
241 | union { | 363 | union { |
242 | struct cpuid cpu_id; | 364 | struct cpuid cpu_id; |
243 | u64 stidp_data; | 365 | u64 stidp_data; |
244 | }; | 366 | }; |
245 | struct gmap *gmap; | 367 | struct gmap *gmap; |
368 | struct kvm_guestdbg_info_arch guestdbg; | ||
246 | #define KVM_S390_PFAULT_TOKEN_INVALID (-1UL) | 369 | #define KVM_S390_PFAULT_TOKEN_INVALID (-1UL) |
247 | unsigned long pfault_token; | 370 | unsigned long pfault_token; |
248 | unsigned long pfault_select; | 371 | unsigned long pfault_select; |
@@ -285,7 +408,9 @@ struct kvm_arch{ | |||
285 | struct gmap *gmap; | 408 | struct gmap *gmap; |
286 | int css_support; | 409 | int css_support; |
287 | int use_irqchip; | 410 | int use_irqchip; |
411 | int use_cmma; | ||
288 | struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; | 412 | struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; |
413 | wait_queue_head_t ipte_wq; | ||
289 | }; | 414 | }; |
290 | 415 | ||
291 | #define KVM_HVA_ERR_BAD (-1UL) | 416 | #define KVM_HVA_ERR_BAD (-1UL) |
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index bbf8141408cd..e88cb8c54130 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h | |||
@@ -56,13 +56,14 @@ struct _lowcore { | |||
56 | __u16 pgm_code; /* 0x008e */ | 56 | __u16 pgm_code; /* 0x008e */ |
57 | __u32 trans_exc_code; /* 0x0090 */ | 57 | __u32 trans_exc_code; /* 0x0090 */ |
58 | __u16 mon_class_num; /* 0x0094 */ | 58 | __u16 mon_class_num; /* 0x0094 */ |
59 | __u16 per_perc_atmid; /* 0x0096 */ | 59 | __u8 per_code; /* 0x0096 */ |
60 | __u8 per_atmid; /* 0x0097 */ | ||
60 | __u32 per_address; /* 0x0098 */ | 61 | __u32 per_address; /* 0x0098 */ |
61 | __u32 monitor_code; /* 0x009c */ | 62 | __u32 monitor_code; /* 0x009c */ |
62 | __u8 exc_access_id; /* 0x00a0 */ | 63 | __u8 exc_access_id; /* 0x00a0 */ |
63 | __u8 per_access_id; /* 0x00a1 */ | 64 | __u8 per_access_id; /* 0x00a1 */ |
64 | __u8 op_access_id; /* 0x00a2 */ | 65 | __u8 op_access_id; /* 0x00a2 */ |
65 | __u8 ar_access_id; /* 0x00a3 */ | 66 | __u8 ar_mode_id; /* 0x00a3 */ |
66 | __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */ | 67 | __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */ |
67 | __u16 subchannel_id; /* 0x00b8 */ | 68 | __u16 subchannel_id; /* 0x00b8 */ |
68 | __u16 subchannel_nr; /* 0x00ba */ | 69 | __u16 subchannel_nr; /* 0x00ba */ |
@@ -196,12 +197,13 @@ struct _lowcore { | |||
196 | __u16 pgm_code; /* 0x008e */ | 197 | __u16 pgm_code; /* 0x008e */ |
197 | __u32 data_exc_code; /* 0x0090 */ | 198 | __u32 data_exc_code; /* 0x0090 */ |
198 | __u16 mon_class_num; /* 0x0094 */ | 199 | __u16 mon_class_num; /* 0x0094 */ |
199 | __u16 per_perc_atmid; /* 0x0096 */ | 200 | __u8 per_code; /* 0x0096 */ |
201 | __u8 per_atmid; /* 0x0097 */ | ||
200 | __u64 per_address; /* 0x0098 */ | 202 | __u64 per_address; /* 0x0098 */ |
201 | __u8 exc_access_id; /* 0x00a0 */ | 203 | __u8 exc_access_id; /* 0x00a0 */ |
202 | __u8 per_access_id; /* 0x00a1 */ | 204 | __u8 per_access_id; /* 0x00a1 */ |
203 | __u8 op_access_id; /* 0x00a2 */ | 205 | __u8 op_access_id; /* 0x00a2 */ |
204 | __u8 ar_access_id; /* 0x00a3 */ | 206 | __u8 ar_mode_id; /* 0x00a3 */ |
205 | __u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */ | 207 | __u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */ |
206 | __u64 trans_exc_code; /* 0x00a8 */ | 208 | __u64 trans_exc_code; /* 0x00a8 */ |
207 | __u64 monitor_code; /* 0x00b0 */ | 209 | __u64 monitor_code; /* 0x00b0 */ |
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index f77695a82f64..a5e656260a70 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
@@ -16,6 +16,8 @@ typedef struct { | |||
16 | unsigned long vdso_base; | 16 | unsigned long vdso_base; |
17 | /* The mmu context has extended page tables. */ | 17 | /* The mmu context has extended page tables. */ |
18 | unsigned int has_pgste:1; | 18 | unsigned int has_pgste:1; |
19 | /* The mmu context uses storage keys. */ | ||
20 | unsigned int use_skey:1; | ||
19 | } mm_context_t; | 21 | } mm_context_t; |
20 | 22 | ||
21 | #define INIT_MM_CONTEXT(name) \ | 23 | #define INIT_MM_CONTEXT(name) \ |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 71be346d0e3c..d42fb1b728d8 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -23,6 +23,7 @@ static inline int init_new_context(struct task_struct *tsk, | |||
23 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; | 23 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; |
24 | #endif | 24 | #endif |
25 | mm->context.has_pgste = 0; | 25 | mm->context.has_pgste = 0; |
26 | mm->context.use_skey = 0; | ||
26 | mm->context.asce_limit = STACK_TOP_MAX; | 27 | mm->context.asce_limit = STACK_TOP_MAX; |
27 | crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); | 28 | crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); |
28 | return 0; | 29 | return 0; |
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 884017cbfa9f..9e18a61d3df3 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h | |||
@@ -22,7 +22,8 @@ unsigned long *page_table_alloc(struct mm_struct *, unsigned long); | |||
22 | void page_table_free(struct mm_struct *, unsigned long *); | 22 | void page_table_free(struct mm_struct *, unsigned long *); |
23 | void page_table_free_rcu(struct mmu_gather *, unsigned long *); | 23 | void page_table_free_rcu(struct mmu_gather *, unsigned long *); |
24 | 24 | ||
25 | void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long); | 25 | void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long, |
26 | bool init_skey); | ||
26 | int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, | 27 | int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, |
27 | unsigned long key, bool nq); | 28 | unsigned long key, bool nq); |
28 | 29 | ||
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 12f75313e086..fcba5e03839f 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -309,7 +309,8 @@ extern unsigned long MODULES_END; | |||
309 | #define PGSTE_HC_BIT 0x00200000UL | 309 | #define PGSTE_HC_BIT 0x00200000UL |
310 | #define PGSTE_GR_BIT 0x00040000UL | 310 | #define PGSTE_GR_BIT 0x00040000UL |
311 | #define PGSTE_GC_BIT 0x00020000UL | 311 | #define PGSTE_GC_BIT 0x00020000UL |
312 | #define PGSTE_IN_BIT 0x00008000UL /* IPTE notify bit */ | 312 | #define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */ |
313 | #define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */ | ||
313 | 314 | ||
314 | #else /* CONFIG_64BIT */ | 315 | #else /* CONFIG_64BIT */ |
315 | 316 | ||
@@ -391,7 +392,8 @@ extern unsigned long MODULES_END; | |||
391 | #define PGSTE_HC_BIT 0x0020000000000000UL | 392 | #define PGSTE_HC_BIT 0x0020000000000000UL |
392 | #define PGSTE_GR_BIT 0x0004000000000000UL | 393 | #define PGSTE_GR_BIT 0x0004000000000000UL |
393 | #define PGSTE_GC_BIT 0x0002000000000000UL | 394 | #define PGSTE_GC_BIT 0x0002000000000000UL |
394 | #define PGSTE_IN_BIT 0x0000800000000000UL /* IPTE notify bit */ | 395 | #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ |
396 | #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ | ||
395 | 397 | ||
396 | #endif /* CONFIG_64BIT */ | 398 | #endif /* CONFIG_64BIT */ |
397 | 399 | ||
@@ -466,6 +468,16 @@ static inline int mm_has_pgste(struct mm_struct *mm) | |||
466 | #endif | 468 | #endif |
467 | return 0; | 469 | return 0; |
468 | } | 470 | } |
471 | |||
472 | static inline int mm_use_skey(struct mm_struct *mm) | ||
473 | { | ||
474 | #ifdef CONFIG_PGSTE | ||
475 | if (mm->context.use_skey) | ||
476 | return 1; | ||
477 | #endif | ||
478 | return 0; | ||
479 | } | ||
480 | |||
469 | /* | 481 | /* |
470 | * pgd/pmd/pte query functions | 482 | * pgd/pmd/pte query functions |
471 | */ | 483 | */ |
@@ -699,26 +711,17 @@ static inline void pgste_set(pte_t *ptep, pgste_t pgste) | |||
699 | #endif | 711 | #endif |
700 | } | 712 | } |
701 | 713 | ||
702 | static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) | 714 | static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste, |
715 | struct mm_struct *mm) | ||
703 | { | 716 | { |
704 | #ifdef CONFIG_PGSTE | 717 | #ifdef CONFIG_PGSTE |
705 | unsigned long address, bits, skey; | 718 | unsigned long address, bits, skey; |
706 | 719 | ||
707 | if (pte_val(*ptep) & _PAGE_INVALID) | 720 | if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID) |
708 | return pgste; | 721 | return pgste; |
709 | address = pte_val(*ptep) & PAGE_MASK; | 722 | address = pte_val(*ptep) & PAGE_MASK; |
710 | skey = (unsigned long) page_get_storage_key(address); | 723 | skey = (unsigned long) page_get_storage_key(address); |
711 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); | 724 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); |
712 | if (!(pgste_val(pgste) & PGSTE_HC_BIT) && (bits & _PAGE_CHANGED)) { | ||
713 | /* Transfer dirty + referenced bit to host bits in pgste */ | ||
714 | pgste_val(pgste) |= bits << 52; | ||
715 | page_set_storage_key(address, skey ^ bits, 0); | ||
716 | } else if (!(pgste_val(pgste) & PGSTE_HR_BIT) && | ||
717 | (bits & _PAGE_REFERENCED)) { | ||
718 | /* Transfer referenced bit to host bit in pgste */ | ||
719 | pgste_val(pgste) |= PGSTE_HR_BIT; | ||
720 | page_reset_referenced(address); | ||
721 | } | ||
722 | /* Transfer page changed & referenced bit to guest bits in pgste */ | 725 | /* Transfer page changed & referenced bit to guest bits in pgste */ |
723 | pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ | 726 | pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ |
724 | /* Copy page access key and fetch protection bit to pgste */ | 727 | /* Copy page access key and fetch protection bit to pgste */ |
@@ -729,25 +732,14 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) | |||
729 | 732 | ||
730 | } | 733 | } |
731 | 734 | ||
732 | static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) | 735 | static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry, |
733 | { | 736 | struct mm_struct *mm) |
734 | #ifdef CONFIG_PGSTE | ||
735 | if (pte_val(*ptep) & _PAGE_INVALID) | ||
736 | return pgste; | ||
737 | /* Get referenced bit from storage key */ | ||
738 | if (page_reset_referenced(pte_val(*ptep) & PAGE_MASK)) | ||
739 | pgste_val(pgste) |= PGSTE_HR_BIT | PGSTE_GR_BIT; | ||
740 | #endif | ||
741 | return pgste; | ||
742 | } | ||
743 | |||
744 | static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) | ||
745 | { | 737 | { |
746 | #ifdef CONFIG_PGSTE | 738 | #ifdef CONFIG_PGSTE |
747 | unsigned long address; | 739 | unsigned long address; |
748 | unsigned long nkey; | 740 | unsigned long nkey; |
749 | 741 | ||
750 | if (pte_val(entry) & _PAGE_INVALID) | 742 | if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID) |
751 | return; | 743 | return; |
752 | VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); | 744 | VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); |
753 | address = pte_val(entry) & PAGE_MASK; | 745 | address = pte_val(entry) & PAGE_MASK; |
@@ -757,23 +749,30 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) | |||
757 | * key C/R to 0. | 749 | * key C/R to 0. |
758 | */ | 750 | */ |
759 | nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; | 751 | nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; |
752 | nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48; | ||
760 | page_set_storage_key(address, nkey, 0); | 753 | page_set_storage_key(address, nkey, 0); |
761 | #endif | 754 | #endif |
762 | } | 755 | } |
763 | 756 | ||
764 | static inline void pgste_set_pte(pte_t *ptep, pte_t entry) | 757 | static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) |
765 | { | 758 | { |
766 | if (!MACHINE_HAS_ESOP && | 759 | if ((pte_val(entry) & _PAGE_PRESENT) && |
767 | (pte_val(entry) & _PAGE_PRESENT) && | 760 | (pte_val(entry) & _PAGE_WRITE) && |
768 | (pte_val(entry) & _PAGE_WRITE)) { | 761 | !(pte_val(entry) & _PAGE_INVALID)) { |
769 | /* | 762 | if (!MACHINE_HAS_ESOP) { |
770 | * Without enhanced suppression-on-protection force | 763 | /* |
771 | * the dirty bit on for all writable ptes. | 764 | * Without enhanced suppression-on-protection force |
772 | */ | 765 | * the dirty bit on for all writable ptes. |
773 | pte_val(entry) |= _PAGE_DIRTY; | 766 | */ |
774 | pte_val(entry) &= ~_PAGE_PROTECT; | 767 | pte_val(entry) |= _PAGE_DIRTY; |
768 | pte_val(entry) &= ~_PAGE_PROTECT; | ||
769 | } | ||
770 | if (!(pte_val(entry) & _PAGE_PROTECT)) | ||
771 | /* This pte allows write access, set user-dirty */ | ||
772 | pgste_val(pgste) |= PGSTE_UC_BIT; | ||
775 | } | 773 | } |
776 | *ptep = entry; | 774 | *ptep = entry; |
775 | return pgste; | ||
777 | } | 776 | } |
778 | 777 | ||
779 | /** | 778 | /** |
@@ -839,6 +838,8 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *); | |||
839 | unsigned long gmap_fault(unsigned long address, struct gmap *); | 838 | unsigned long gmap_fault(unsigned long address, struct gmap *); |
840 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *); | 839 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *); |
841 | void __gmap_zap(unsigned long address, struct gmap *); | 840 | void __gmap_zap(unsigned long address, struct gmap *); |
841 | bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *); | ||
842 | |||
842 | 843 | ||
843 | void gmap_register_ipte_notifier(struct gmap_notifier *); | 844 | void gmap_register_ipte_notifier(struct gmap_notifier *); |
844 | void gmap_unregister_ipte_notifier(struct gmap_notifier *); | 845 | void gmap_unregister_ipte_notifier(struct gmap_notifier *); |
@@ -870,8 +871,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
870 | if (mm_has_pgste(mm)) { | 871 | if (mm_has_pgste(mm)) { |
871 | pgste = pgste_get_lock(ptep); | 872 | pgste = pgste_get_lock(ptep); |
872 | pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; | 873 | pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; |
873 | pgste_set_key(ptep, pgste, entry); | 874 | pgste_set_key(ptep, pgste, entry, mm); |
874 | pgste_set_pte(ptep, entry); | 875 | pgste = pgste_set_pte(ptep, pgste, entry); |
875 | pgste_set_unlock(ptep, pgste); | 876 | pgste_set_unlock(ptep, pgste); |
876 | } else { | 877 | } else { |
877 | if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1) | 878 | if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1) |
@@ -1017,45 +1018,6 @@ static inline pte_t pte_mkhuge(pte_t pte) | |||
1017 | } | 1018 | } |
1018 | #endif | 1019 | #endif |
1019 | 1020 | ||
1020 | /* | ||
1021 | * Get (and clear) the user dirty bit for a pte. | ||
1022 | */ | ||
1023 | static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, | ||
1024 | pte_t *ptep) | ||
1025 | { | ||
1026 | pgste_t pgste; | ||
1027 | int dirty = 0; | ||
1028 | |||
1029 | if (mm_has_pgste(mm)) { | ||
1030 | pgste = pgste_get_lock(ptep); | ||
1031 | pgste = pgste_update_all(ptep, pgste); | ||
1032 | dirty = !!(pgste_val(pgste) & PGSTE_HC_BIT); | ||
1033 | pgste_val(pgste) &= ~PGSTE_HC_BIT; | ||
1034 | pgste_set_unlock(ptep, pgste); | ||
1035 | return dirty; | ||
1036 | } | ||
1037 | return dirty; | ||
1038 | } | ||
1039 | |||
1040 | /* | ||
1041 | * Get (and clear) the user referenced bit for a pte. | ||
1042 | */ | ||
1043 | static inline int ptep_test_and_clear_user_young(struct mm_struct *mm, | ||
1044 | pte_t *ptep) | ||
1045 | { | ||
1046 | pgste_t pgste; | ||
1047 | int young = 0; | ||
1048 | |||
1049 | if (mm_has_pgste(mm)) { | ||
1050 | pgste = pgste_get_lock(ptep); | ||
1051 | pgste = pgste_update_young(ptep, pgste); | ||
1052 | young = !!(pgste_val(pgste) & PGSTE_HR_BIT); | ||
1053 | pgste_val(pgste) &= ~PGSTE_HR_BIT; | ||
1054 | pgste_set_unlock(ptep, pgste); | ||
1055 | } | ||
1056 | return young; | ||
1057 | } | ||
1058 | |||
1059 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) | 1021 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) |
1060 | { | 1022 | { |
1061 | unsigned long pto = (unsigned long) ptep; | 1023 | unsigned long pto = (unsigned long) ptep; |
@@ -1118,6 +1080,36 @@ static inline void ptep_flush_lazy(struct mm_struct *mm, | |||
1118 | atomic_sub(0x10000, &mm->context.attach_count); | 1080 | atomic_sub(0x10000, &mm->context.attach_count); |
1119 | } | 1081 | } |
1120 | 1082 | ||
1083 | /* | ||
1084 | * Get (and clear) the user dirty bit for a pte. | ||
1085 | */ | ||
1086 | static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, | ||
1087 | unsigned long addr, | ||
1088 | pte_t *ptep) | ||
1089 | { | ||
1090 | pgste_t pgste; | ||
1091 | pte_t pte; | ||
1092 | int dirty; | ||
1093 | |||
1094 | if (!mm_has_pgste(mm)) | ||
1095 | return 0; | ||
1096 | pgste = pgste_get_lock(ptep); | ||
1097 | dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT); | ||
1098 | pgste_val(pgste) &= ~PGSTE_UC_BIT; | ||
1099 | pte = *ptep; | ||
1100 | if (dirty && (pte_val(pte) & _PAGE_PRESENT)) { | ||
1101 | pgste = pgste_ipte_notify(mm, ptep, pgste); | ||
1102 | __ptep_ipte(addr, ptep); | ||
1103 | if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) | ||
1104 | pte_val(pte) |= _PAGE_PROTECT; | ||
1105 | else | ||
1106 | pte_val(pte) |= _PAGE_INVALID; | ||
1107 | *ptep = pte; | ||
1108 | } | ||
1109 | pgste_set_unlock(ptep, pgste); | ||
1110 | return dirty; | ||
1111 | } | ||
1112 | |||
1121 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 1113 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
1122 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | 1114 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, |
1123 | unsigned long addr, pte_t *ptep) | 1115 | unsigned long addr, pte_t *ptep) |
@@ -1137,7 +1129,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | |||
1137 | pte = pte_mkold(pte); | 1129 | pte = pte_mkold(pte); |
1138 | 1130 | ||
1139 | if (mm_has_pgste(vma->vm_mm)) { | 1131 | if (mm_has_pgste(vma->vm_mm)) { |
1140 | pgste_set_pte(ptep, pte); | 1132 | pgste = pgste_set_pte(ptep, pgste, pte); |
1141 | pgste_set_unlock(ptep, pgste); | 1133 | pgste_set_unlock(ptep, pgste); |
1142 | } else | 1134 | } else |
1143 | *ptep = pte; | 1135 | *ptep = pte; |
@@ -1182,7 +1174,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |||
1182 | pte_val(*ptep) = _PAGE_INVALID; | 1174 | pte_val(*ptep) = _PAGE_INVALID; |
1183 | 1175 | ||
1184 | if (mm_has_pgste(mm)) { | 1176 | if (mm_has_pgste(mm)) { |
1185 | pgste = pgste_update_all(&pte, pgste); | 1177 | pgste = pgste_update_all(&pte, pgste, mm); |
1186 | pgste_set_unlock(ptep, pgste); | 1178 | pgste_set_unlock(ptep, pgste); |
1187 | } | 1179 | } |
1188 | return pte; | 1180 | return pte; |
@@ -1205,7 +1197,7 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, | |||
1205 | ptep_flush_lazy(mm, address, ptep); | 1197 | ptep_flush_lazy(mm, address, ptep); |
1206 | 1198 | ||
1207 | if (mm_has_pgste(mm)) { | 1199 | if (mm_has_pgste(mm)) { |
1208 | pgste = pgste_update_all(&pte, pgste); | 1200 | pgste = pgste_update_all(&pte, pgste, mm); |
1209 | pgste_set(ptep, pgste); | 1201 | pgste_set(ptep, pgste); |
1210 | } | 1202 | } |
1211 | return pte; | 1203 | return pte; |
@@ -1219,8 +1211,8 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, | |||
1219 | 1211 | ||
1220 | if (mm_has_pgste(mm)) { | 1212 | if (mm_has_pgste(mm)) { |
1221 | pgste = pgste_get(ptep); | 1213 | pgste = pgste_get(ptep); |
1222 | pgste_set_key(ptep, pgste, pte); | 1214 | pgste_set_key(ptep, pgste, pte, mm); |
1223 | pgste_set_pte(ptep, pte); | 1215 | pgste = pgste_set_pte(ptep, pgste, pte); |
1224 | pgste_set_unlock(ptep, pgste); | 1216 | pgste_set_unlock(ptep, pgste); |
1225 | } else | 1217 | } else |
1226 | *ptep = pte; | 1218 | *ptep = pte; |
@@ -1246,7 +1238,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, | |||
1246 | if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == | 1238 | if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == |
1247 | _PGSTE_GPS_USAGE_UNUSED) | 1239 | _PGSTE_GPS_USAGE_UNUSED) |
1248 | pte_val(pte) |= _PAGE_UNUSED; | 1240 | pte_val(pte) |= _PAGE_UNUSED; |
1249 | pgste = pgste_update_all(&pte, pgste); | 1241 | pgste = pgste_update_all(&pte, pgste, vma->vm_mm); |
1250 | pgste_set_unlock(ptep, pgste); | 1242 | pgste_set_unlock(ptep, pgste); |
1251 | } | 1243 | } |
1252 | return pte; | 1244 | return pte; |
@@ -1278,7 +1270,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | |||
1278 | pte_val(*ptep) = _PAGE_INVALID; | 1270 | pte_val(*ptep) = _PAGE_INVALID; |
1279 | 1271 | ||
1280 | if (!full && mm_has_pgste(mm)) { | 1272 | if (!full && mm_has_pgste(mm)) { |
1281 | pgste = pgste_update_all(&pte, pgste); | 1273 | pgste = pgste_update_all(&pte, pgste, mm); |
1282 | pgste_set_unlock(ptep, pgste); | 1274 | pgste_set_unlock(ptep, pgste); |
1283 | } | 1275 | } |
1284 | return pte; | 1276 | return pte; |
@@ -1301,7 +1293,7 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, | |||
1301 | pte = pte_wrprotect(pte); | 1293 | pte = pte_wrprotect(pte); |
1302 | 1294 | ||
1303 | if (mm_has_pgste(mm)) { | 1295 | if (mm_has_pgste(mm)) { |
1304 | pgste_set_pte(ptep, pte); | 1296 | pgste = pgste_set_pte(ptep, pgste, pte); |
1305 | pgste_set_unlock(ptep, pgste); | 1297 | pgste_set_unlock(ptep, pgste); |
1306 | } else | 1298 | } else |
1307 | *ptep = pte; | 1299 | *ptep = pte; |
@@ -1326,7 +1318,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, | |||
1326 | ptep_flush_direct(vma->vm_mm, address, ptep); | 1318 | ptep_flush_direct(vma->vm_mm, address, ptep); |
1327 | 1319 | ||
1328 | if (mm_has_pgste(vma->vm_mm)) { | 1320 | if (mm_has_pgste(vma->vm_mm)) { |
1329 | pgste_set_pte(ptep, entry); | 1321 | pgste = pgste_set_pte(ptep, pgste, entry); |
1330 | pgste_set_unlock(ptep, pgste); | 1322 | pgste_set_unlock(ptep, pgste); |
1331 | } else | 1323 | } else |
1332 | *ptep = entry; | 1324 | *ptep = entry; |
@@ -1734,6 +1726,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) | |||
1734 | extern int vmem_add_mapping(unsigned long start, unsigned long size); | 1726 | extern int vmem_add_mapping(unsigned long start, unsigned long size); |
1735 | extern int vmem_remove_mapping(unsigned long start, unsigned long size); | 1727 | extern int vmem_remove_mapping(unsigned long start, unsigned long size); |
1736 | extern int s390_enable_sie(void); | 1728 | extern int s390_enable_sie(void); |
1729 | extern void s390_enable_skey(void); | ||
1737 | 1730 | ||
1738 | /* | 1731 | /* |
1739 | * No page table caches to initialise | 1732 | * No page table caches to initialise |
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index f4783c0b7b43..6e7708f3d866 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h | |||
@@ -16,6 +16,50 @@ | |||
16 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \ | 16 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \ |
17 | PSW_MASK_PSTATE | PSW_ASC_PRIMARY) | 17 | PSW_MASK_PSTATE | PSW_ASC_PRIMARY) |
18 | 18 | ||
19 | struct psw_bits { | ||
20 | unsigned long long : 1; | ||
21 | unsigned long long r : 1; /* PER-Mask */ | ||
22 | unsigned long long : 3; | ||
23 | unsigned long long t : 1; /* DAT Mode */ | ||
24 | unsigned long long i : 1; /* Input/Output Mask */ | ||
25 | unsigned long long e : 1; /* External Mask */ | ||
26 | unsigned long long key : 4; /* PSW Key */ | ||
27 | unsigned long long : 1; | ||
28 | unsigned long long m : 1; /* Machine-Check Mask */ | ||
29 | unsigned long long w : 1; /* Wait State */ | ||
30 | unsigned long long p : 1; /* Problem State */ | ||
31 | unsigned long long as : 2; /* Address Space Control */ | ||
32 | unsigned long long cc : 2; /* Condition Code */ | ||
33 | unsigned long long pm : 4; /* Program Mask */ | ||
34 | unsigned long long ri : 1; /* Runtime Instrumentation */ | ||
35 | unsigned long long : 6; | ||
36 | unsigned long long eaba : 2; /* Addressing Mode */ | ||
37 | #ifdef CONFIG_64BIT | ||
38 | unsigned long long : 31; | ||
39 | unsigned long long ia : 64;/* Instruction Address */ | ||
40 | #else | ||
41 | unsigned long long ia : 31;/* Instruction Address */ | ||
42 | #endif | ||
43 | }; | ||
44 | |||
45 | enum { | ||
46 | PSW_AMODE_24BIT = 0, | ||
47 | PSW_AMODE_31BIT = 1, | ||
48 | PSW_AMODE_64BIT = 3 | ||
49 | }; | ||
50 | |||
51 | enum { | ||
52 | PSW_AS_PRIMARY = 0, | ||
53 | PSW_AS_ACCREG = 1, | ||
54 | PSW_AS_SECONDARY = 2, | ||
55 | PSW_AS_HOME = 3 | ||
56 | }; | ||
57 | |||
58 | #define psw_bits(__psw) (*({ \ | ||
59 | typecheck(psw_t, __psw); \ | ||
60 | &(*(struct psw_bits *)(&(__psw))); \ | ||
61 | })) | ||
62 | |||
19 | /* | 63 | /* |
20 | * The pt_regs struct defines the way the registers are stored on | 64 | * The pt_regs struct defines the way the registers are stored on |
21 | * the stack during a system call. | 65 | * the stack during a system call. |
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 2f5e9932b4de..943d43451116 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h | |||
@@ -28,7 +28,11 @@ struct sclp_ipl_info { | |||
28 | 28 | ||
29 | struct sclp_cpu_entry { | 29 | struct sclp_cpu_entry { |
30 | u8 address; | 30 | u8 address; |
31 | u8 reserved0[13]; | 31 | u8 reserved0[2]; |
32 | u8 : 3; | ||
33 | u8 siif : 1; | ||
34 | u8 : 4; | ||
35 | u8 reserved2[10]; | ||
32 | u8 type; | 36 | u8 type; |
33 | u8 reserved1; | 37 | u8 reserved1; |
34 | } __attribute__((packed)); | 38 | } __attribute__((packed)); |
@@ -61,5 +65,6 @@ int sclp_pci_deconfigure(u32 fid); | |||
61 | int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); | 65 | int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); |
62 | unsigned long sclp_get_hsa_size(void); | 66 | unsigned long sclp_get_hsa_size(void); |
63 | void sclp_early_detect(void); | 67 | void sclp_early_detect(void); |
68 | int sclp_has_siif(void); | ||
64 | 69 | ||
65 | #endif /* _ASM_S390_SCLP_H */ | 70 | #endif /* _ASM_S390_SCLP_H */ |
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index c003c6a73b1e..0fc26430a1e5 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | 16 | ||
17 | #define __KVM_S390 | 17 | #define __KVM_S390 |
18 | #define __KVM_HAVE_GUEST_DEBUG | ||
18 | 19 | ||
19 | /* Device control API: s390-specific devices */ | 20 | /* Device control API: s390-specific devices */ |
20 | #define KVM_DEV_FLIC_GET_ALL_IRQS 1 | 21 | #define KVM_DEV_FLIC_GET_ALL_IRQS 1 |
@@ -54,6 +55,13 @@ struct kvm_s390_io_adapter_req { | |||
54 | __u64 addr; | 55 | __u64 addr; |
55 | }; | 56 | }; |
56 | 57 | ||
58 | /* kvm attr_group on vm fd */ | ||
59 | #define KVM_S390_VM_MEM_CTRL 0 | ||
60 | |||
61 | /* kvm attributes for mem_ctrl */ | ||
62 | #define KVM_S390_VM_MEM_ENABLE_CMMA 0 | ||
63 | #define KVM_S390_VM_MEM_CLR_CMMA 1 | ||
64 | |||
57 | /* for KVM_GET_REGS and KVM_SET_REGS */ | 65 | /* for KVM_GET_REGS and KVM_SET_REGS */ |
58 | struct kvm_regs { | 66 | struct kvm_regs { |
59 | /* general purpose regs for s390 */ | 67 | /* general purpose regs for s390 */ |
@@ -72,11 +80,31 @@ struct kvm_fpu { | |||
72 | __u64 fprs[16]; | 80 | __u64 fprs[16]; |
73 | }; | 81 | }; |
74 | 82 | ||
83 | #define KVM_GUESTDBG_USE_HW_BP 0x00010000 | ||
84 | |||
85 | #define KVM_HW_BP 1 | ||
86 | #define KVM_HW_WP_WRITE 2 | ||
87 | #define KVM_SINGLESTEP 4 | ||
88 | |||
75 | struct kvm_debug_exit_arch { | 89 | struct kvm_debug_exit_arch { |
90 | __u64 addr; | ||
91 | __u8 type; | ||
92 | __u8 pad[7]; /* Should be set to 0 */ | ||
93 | }; | ||
94 | |||
95 | struct kvm_hw_breakpoint { | ||
96 | __u64 addr; | ||
97 | __u64 phys_addr; | ||
98 | __u64 len; | ||
99 | __u8 type; | ||
100 | __u8 pad[7]; /* Should be set to 0 */ | ||
76 | }; | 101 | }; |
77 | 102 | ||
78 | /* for KVM_SET_GUEST_DEBUG */ | 103 | /* for KVM_SET_GUEST_DEBUG */ |
79 | struct kvm_guest_debug_arch { | 104 | struct kvm_guest_debug_arch { |
105 | __u32 nr_hw_bp; | ||
106 | __u32 pad; /* Should be set to 0 */ | ||
107 | struct kvm_hw_breakpoint __user *hw_bp; | ||
80 | }; | 108 | }; |
81 | 109 | ||
82 | #define KVM_SYNC_PREFIX (1UL << 0) | 110 | #define KVM_SYNC_PREFIX (1UL << 0) |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index cc10cdd4d6a2..859a7ed36c4b 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -89,16 +89,22 @@ int main(void) | |||
89 | DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); | 89 | DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); |
90 | DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); | 90 | DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); |
91 | DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code)); | 91 | DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code)); |
92 | DEFINE(__LC_PER_CAUSE, offsetof(struct _lowcore, per_perc_atmid)); | 92 | DEFINE(__LC_MON_CLASS_NR, offsetof(struct _lowcore, mon_class_num)); |
93 | DEFINE(__LC_PER_CODE, offsetof(struct _lowcore, per_code)); | ||
94 | DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_atmid)); | ||
93 | DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); | 95 | DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); |
94 | DEFINE(__LC_PER_PAID, offsetof(struct _lowcore, per_access_id)); | 96 | DEFINE(__LC_EXC_ACCESS_ID, offsetof(struct _lowcore, exc_access_id)); |
95 | DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id)); | 97 | DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); |
98 | DEFINE(__LC_OP_ACCESS_ID, offsetof(struct _lowcore, op_access_id)); | ||
99 | DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_mode_id)); | ||
100 | DEFINE(__LC_MON_CODE, offsetof(struct _lowcore, monitor_code)); | ||
96 | DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); | 101 | DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); |
97 | DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); | 102 | DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); |
98 | DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm)); | 103 | DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm)); |
99 | DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word)); | 104 | DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word)); |
100 | DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list)); | 105 | DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list)); |
101 | DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code)); | 106 | DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code)); |
107 | DEFINE(__LC_MCCK_EXT_DAM_CODE, offsetof(struct _lowcore, external_damage_code)); | ||
102 | DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw)); | 108 | DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw)); |
103 | DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw)); | 109 | DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw)); |
104 | DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw)); | 110 | DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw)); |
@@ -156,6 +162,8 @@ int main(void) | |||
156 | #ifdef CONFIG_32BIT | 162 | #ifdef CONFIG_32BIT |
157 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); | 163 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); |
158 | #else /* CONFIG_32BIT */ | 164 | #else /* CONFIG_32BIT */ |
165 | DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); | ||
166 | DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); | ||
159 | DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); | 167 | DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); |
160 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); | 168 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); |
161 | DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); | 169 | DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 1662038516c0..e66f046b9c43 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -391,8 +391,8 @@ ENTRY(pgm_check_handler) | |||
391 | jz pgm_kprobe | 391 | jz pgm_kprobe |
392 | oi __TI_flags+3(%r12),_TIF_PER_TRAP | 392 | oi __TI_flags+3(%r12),_TIF_PER_TRAP |
393 | mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS | 393 | mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS |
394 | mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE | 394 | mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE |
395 | mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID | 395 | mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID |
396 | 0: REENABLE_IRQS | 396 | 0: REENABLE_IRQS |
397 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 397 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
398 | l %r1,BASED(.Ljump_table) | 398 | l %r1,BASED(.Ljump_table) |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 5963e43618bb..3c34753de6ad 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -423,8 +423,8 @@ ENTRY(pgm_check_handler) | |||
423 | jz pgm_kprobe | 423 | jz pgm_kprobe |
424 | oi __TI_flags+7(%r12),_TIF_PER_TRAP | 424 | oi __TI_flags+7(%r12),_TIF_PER_TRAP |
425 | mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS | 425 | mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS |
426 | mvc __THREAD_per_cause(2,%r14),__LC_PER_CAUSE | 426 | mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE |
427 | mvc __THREAD_per_paid(1,%r14),__LC_PER_PAID | 427 | mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID |
428 | 0: REENABLE_IRQS | 428 | 0: REENABLE_IRQS |
429 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 429 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
430 | larl %r1,pgm_check_table | 430 | larl %r1,pgm_check_table |
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile index d3adb37e93a4..b3b553469650 100644 --- a/arch/s390/kvm/Makefile +++ b/arch/s390/kvm/Makefile | |||
@@ -11,5 +11,7 @@ common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqch | |||
11 | 11 | ||
12 | ccflags-y := -Ivirt/kvm -Iarch/s390/kvm | 12 | ccflags-y := -Ivirt/kvm -Iarch/s390/kvm |
13 | 13 | ||
14 | kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o diag.o | 14 | kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o |
15 | kvm-objs += diag.o gaccess.o guestdbg.o | ||
16 | |||
15 | obj-$(CONFIG_KVM) += kvm.o | 17 | obj-$(CONFIG_KVM) += kvm.o |
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 08dfc839a6cf..5521ace8b60d 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c | |||
@@ -64,12 +64,12 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) | |||
64 | int rc; | 64 | int rc; |
65 | u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; | 65 | u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; |
66 | u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); | 66 | u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); |
67 | unsigned long hva_token = KVM_HVA_ERR_BAD; | ||
68 | 67 | ||
69 | if (vcpu->run->s.regs.gprs[rx] & 7) | 68 | if (vcpu->run->s.regs.gprs[rx] & 7) |
70 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 69 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
71 | if (copy_from_guest(vcpu, &parm, vcpu->run->s.regs.gprs[rx], sizeof(parm))) | 70 | rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm)); |
72 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 71 | if (rc) |
72 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
73 | if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) | 73 | if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) |
74 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 74 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
75 | 75 | ||
@@ -89,8 +89,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) | |||
89 | parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL) | 89 | parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL) |
90 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 90 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
91 | 91 | ||
92 | hva_token = gfn_to_hva(vcpu->kvm, gpa_to_gfn(parm.token_addr)); | 92 | if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr)) |
93 | if (kvm_is_error_hva(hva_token)) | ||
94 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 93 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
95 | 94 | ||
96 | vcpu->arch.pfault_token = parm.token_addr; | 95 | vcpu->arch.pfault_token = parm.token_addr; |
@@ -167,17 +166,11 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) | |||
167 | 166 | ||
168 | VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); | 167 | VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); |
169 | switch (subcode) { | 168 | switch (subcode) { |
170 | case 0: | ||
171 | case 1: | ||
172 | page_table_reset_pgste(current->mm, 0, TASK_SIZE); | ||
173 | return -EOPNOTSUPP; | ||
174 | case 3: | 169 | case 3: |
175 | vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; | 170 | vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; |
176 | page_table_reset_pgste(current->mm, 0, TASK_SIZE); | ||
177 | break; | 171 | break; |
178 | case 4: | 172 | case 4: |
179 | vcpu->run->s390_reset_flags = 0; | 173 | vcpu->run->s390_reset_flags = 0; |
180 | page_table_reset_pgste(current->mm, 0, TASK_SIZE); | ||
181 | break; | 174 | break; |
182 | default: | 175 | default: |
183 | return -EOPNOTSUPP; | 176 | return -EOPNOTSUPP; |
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c new file mode 100644 index 000000000000..691fdb776c90 --- /dev/null +++ b/arch/s390/kvm/gaccess.c | |||
@@ -0,0 +1,645 @@ | |||
1 | /* | ||
2 | * guest access functions | ||
3 | * | ||
4 | * Copyright IBM Corp. 2014 | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/vmalloc.h> | ||
9 | #include <linux/err.h> | ||
10 | #include <asm/pgtable.h> | ||
11 | #include "kvm-s390.h" | ||
12 | #include "gaccess.h" | ||
13 | |||
14 | union asce { | ||
15 | unsigned long val; | ||
16 | struct { | ||
17 | unsigned long origin : 52; /* Region- or Segment-Table Origin */ | ||
18 | unsigned long : 2; | ||
19 | unsigned long g : 1; /* Subspace Group Control */ | ||
20 | unsigned long p : 1; /* Private Space Control */ | ||
21 | unsigned long s : 1; /* Storage-Alteration-Event Control */ | ||
22 | unsigned long x : 1; /* Space-Switch-Event Control */ | ||
23 | unsigned long r : 1; /* Real-Space Control */ | ||
24 | unsigned long : 1; | ||
25 | unsigned long dt : 2; /* Designation-Type Control */ | ||
26 | unsigned long tl : 2; /* Region- or Segment-Table Length */ | ||
27 | }; | ||
28 | }; | ||
29 | |||
30 | enum { | ||
31 | ASCE_TYPE_SEGMENT = 0, | ||
32 | ASCE_TYPE_REGION3 = 1, | ||
33 | ASCE_TYPE_REGION2 = 2, | ||
34 | ASCE_TYPE_REGION1 = 3 | ||
35 | }; | ||
36 | |||
37 | union region1_table_entry { | ||
38 | unsigned long val; | ||
39 | struct { | ||
40 | unsigned long rto: 52;/* Region-Table Origin */ | ||
41 | unsigned long : 2; | ||
42 | unsigned long p : 1; /* DAT-Protection Bit */ | ||
43 | unsigned long : 1; | ||
44 | unsigned long tf : 2; /* Region-Second-Table Offset */ | ||
45 | unsigned long i : 1; /* Region-Invalid Bit */ | ||
46 | unsigned long : 1; | ||
47 | unsigned long tt : 2; /* Table-Type Bits */ | ||
48 | unsigned long tl : 2; /* Region-Second-Table Length */ | ||
49 | }; | ||
50 | }; | ||
51 | |||
52 | union region2_table_entry { | ||
53 | unsigned long val; | ||
54 | struct { | ||
55 | unsigned long rto: 52;/* Region-Table Origin */ | ||
56 | unsigned long : 2; | ||
57 | unsigned long p : 1; /* DAT-Protection Bit */ | ||
58 | unsigned long : 1; | ||
59 | unsigned long tf : 2; /* Region-Third-Table Offset */ | ||
60 | unsigned long i : 1; /* Region-Invalid Bit */ | ||
61 | unsigned long : 1; | ||
62 | unsigned long tt : 2; /* Table-Type Bits */ | ||
63 | unsigned long tl : 2; /* Region-Third-Table Length */ | ||
64 | }; | ||
65 | }; | ||
66 | |||
67 | struct region3_table_entry_fc0 { | ||
68 | unsigned long sto: 52;/* Segment-Table Origin */ | ||
69 | unsigned long : 1; | ||
70 | unsigned long fc : 1; /* Format-Control */ | ||
71 | unsigned long p : 1; /* DAT-Protection Bit */ | ||
72 | unsigned long : 1; | ||
73 | unsigned long tf : 2; /* Segment-Table Offset */ | ||
74 | unsigned long i : 1; /* Region-Invalid Bit */ | ||
75 | unsigned long cr : 1; /* Common-Region Bit */ | ||
76 | unsigned long tt : 2; /* Table-Type Bits */ | ||
77 | unsigned long tl : 2; /* Segment-Table Length */ | ||
78 | }; | ||
79 | |||
80 | struct region3_table_entry_fc1 { | ||
81 | unsigned long rfaa : 33; /* Region-Frame Absolute Address */ | ||
82 | unsigned long : 14; | ||
83 | unsigned long av : 1; /* ACCF-Validity Control */ | ||
84 | unsigned long acc: 4; /* Access-Control Bits */ | ||
85 | unsigned long f : 1; /* Fetch-Protection Bit */ | ||
86 | unsigned long fc : 1; /* Format-Control */ | ||
87 | unsigned long p : 1; /* DAT-Protection Bit */ | ||
88 | unsigned long co : 1; /* Change-Recording Override */ | ||
89 | unsigned long : 2; | ||
90 | unsigned long i : 1; /* Region-Invalid Bit */ | ||
91 | unsigned long cr : 1; /* Common-Region Bit */ | ||
92 | unsigned long tt : 2; /* Table-Type Bits */ | ||
93 | unsigned long : 2; | ||
94 | }; | ||
95 | |||
96 | union region3_table_entry { | ||
97 | unsigned long val; | ||
98 | struct region3_table_entry_fc0 fc0; | ||
99 | struct region3_table_entry_fc1 fc1; | ||
100 | struct { | ||
101 | unsigned long : 53; | ||
102 | unsigned long fc : 1; /* Format-Control */ | ||
103 | unsigned long : 4; | ||
104 | unsigned long i : 1; /* Region-Invalid Bit */ | ||
105 | unsigned long cr : 1; /* Common-Region Bit */ | ||
106 | unsigned long tt : 2; /* Table-Type Bits */ | ||
107 | unsigned long : 2; | ||
108 | }; | ||
109 | }; | ||
110 | |||
111 | struct segment_entry_fc0 { | ||
112 | unsigned long pto: 53;/* Page-Table Origin */ | ||
113 | unsigned long fc : 1; /* Format-Control */ | ||
114 | unsigned long p : 1; /* DAT-Protection Bit */ | ||
115 | unsigned long : 3; | ||
116 | unsigned long i : 1; /* Segment-Invalid Bit */ | ||
117 | unsigned long cs : 1; /* Common-Segment Bit */ | ||
118 | unsigned long tt : 2; /* Table-Type Bits */ | ||
119 | unsigned long : 2; | ||
120 | }; | ||
121 | |||
122 | struct segment_entry_fc1 { | ||
123 | unsigned long sfaa : 44; /* Segment-Frame Absolute Address */ | ||
124 | unsigned long : 3; | ||
125 | unsigned long av : 1; /* ACCF-Validity Control */ | ||
126 | unsigned long acc: 4; /* Access-Control Bits */ | ||
127 | unsigned long f : 1; /* Fetch-Protection Bit */ | ||
128 | unsigned long fc : 1; /* Format-Control */ | ||
129 | unsigned long p : 1; /* DAT-Protection Bit */ | ||
130 | unsigned long co : 1; /* Change-Recording Override */ | ||
131 | unsigned long : 2; | ||
132 | unsigned long i : 1; /* Segment-Invalid Bit */ | ||
133 | unsigned long cs : 1; /* Common-Segment Bit */ | ||
134 | unsigned long tt : 2; /* Table-Type Bits */ | ||
135 | unsigned long : 2; | ||
136 | }; | ||
137 | |||
138 | union segment_table_entry { | ||
139 | unsigned long val; | ||
140 | struct segment_entry_fc0 fc0; | ||
141 | struct segment_entry_fc1 fc1; | ||
142 | struct { | ||
143 | unsigned long : 53; | ||
144 | unsigned long fc : 1; /* Format-Control */ | ||
145 | unsigned long : 4; | ||
146 | unsigned long i : 1; /* Segment-Invalid Bit */ | ||
147 | unsigned long cs : 1; /* Common-Segment Bit */ | ||
148 | unsigned long tt : 2; /* Table-Type Bits */ | ||
149 | unsigned long : 2; | ||
150 | }; | ||
151 | }; | ||
152 | |||
153 | enum { | ||
154 | TABLE_TYPE_SEGMENT = 0, | ||
155 | TABLE_TYPE_REGION3 = 1, | ||
156 | TABLE_TYPE_REGION2 = 2, | ||
157 | TABLE_TYPE_REGION1 = 3 | ||
158 | }; | ||
159 | |||
160 | union page_table_entry { | ||
161 | unsigned long val; | ||
162 | struct { | ||
163 | unsigned long pfra : 52; /* Page-Frame Real Address */ | ||
164 | unsigned long z : 1; /* Zero Bit */ | ||
165 | unsigned long i : 1; /* Page-Invalid Bit */ | ||
166 | unsigned long p : 1; /* DAT-Protection Bit */ | ||
167 | unsigned long co : 1; /* Change-Recording Override */ | ||
168 | unsigned long : 8; | ||
169 | }; | ||
170 | }; | ||
171 | |||
172 | /* | ||
173 | * vaddress union in order to easily decode a virtual address into its | ||
174 | * region first index, region second index etc. parts. | ||
175 | */ | ||
176 | union vaddress { | ||
177 | unsigned long addr; | ||
178 | struct { | ||
179 | unsigned long rfx : 11; | ||
180 | unsigned long rsx : 11; | ||
181 | unsigned long rtx : 11; | ||
182 | unsigned long sx : 11; | ||
183 | unsigned long px : 8; | ||
184 | unsigned long bx : 12; | ||
185 | }; | ||
186 | struct { | ||
187 | unsigned long rfx01 : 2; | ||
188 | unsigned long : 9; | ||
189 | unsigned long rsx01 : 2; | ||
190 | unsigned long : 9; | ||
191 | unsigned long rtx01 : 2; | ||
192 | unsigned long : 9; | ||
193 | unsigned long sx01 : 2; | ||
194 | unsigned long : 29; | ||
195 | }; | ||
196 | }; | ||
197 | |||
198 | /* | ||
199 | * raddress union which will contain the result (real or absolute address) | ||
200 | * after a page table walk. The rfaa, sfaa and pfra members are used to | ||
201 | * simply assign them the value of a region, segment or page table entry. | ||
202 | */ | ||
203 | union raddress { | ||
204 | unsigned long addr; | ||
205 | unsigned long rfaa : 33; /* Region-Frame Absolute Address */ | ||
206 | unsigned long sfaa : 44; /* Segment-Frame Absolute Address */ | ||
207 | unsigned long pfra : 52; /* Page-Frame Real Address */ | ||
208 | }; | ||
209 | |||
210 | static int ipte_lock_count; | ||
211 | static DEFINE_MUTEX(ipte_mutex); | ||
212 | |||
213 | int ipte_lock_held(struct kvm_vcpu *vcpu) | ||
214 | { | ||
215 | union ipte_control *ic = &vcpu->kvm->arch.sca->ipte_control; | ||
216 | |||
217 | if (vcpu->arch.sie_block->eca & 1) | ||
218 | return ic->kh != 0; | ||
219 | return ipte_lock_count != 0; | ||
220 | } | ||
221 | |||
222 | static void ipte_lock_simple(struct kvm_vcpu *vcpu) | ||
223 | { | ||
224 | union ipte_control old, new, *ic; | ||
225 | |||
226 | mutex_lock(&ipte_mutex); | ||
227 | ipte_lock_count++; | ||
228 | if (ipte_lock_count > 1) | ||
229 | goto out; | ||
230 | ic = &vcpu->kvm->arch.sca->ipte_control; | ||
231 | do { | ||
232 | old = ACCESS_ONCE(*ic); | ||
233 | while (old.k) { | ||
234 | cond_resched(); | ||
235 | old = ACCESS_ONCE(*ic); | ||
236 | } | ||
237 | new = old; | ||
238 | new.k = 1; | ||
239 | } while (cmpxchg(&ic->val, old.val, new.val) != old.val); | ||
240 | out: | ||
241 | mutex_unlock(&ipte_mutex); | ||
242 | } | ||
243 | |||
244 | static void ipte_unlock_simple(struct kvm_vcpu *vcpu) | ||
245 | { | ||
246 | union ipte_control old, new, *ic; | ||
247 | |||
248 | mutex_lock(&ipte_mutex); | ||
249 | ipte_lock_count--; | ||
250 | if (ipte_lock_count) | ||
251 | goto out; | ||
252 | ic = &vcpu->kvm->arch.sca->ipte_control; | ||
253 | do { | ||
254 | new = old = ACCESS_ONCE(*ic); | ||
255 | new.k = 0; | ||
256 | } while (cmpxchg(&ic->val, old.val, new.val) != old.val); | ||
257 | if (!ipte_lock_count) | ||
258 | wake_up(&vcpu->kvm->arch.ipte_wq); | ||
259 | out: | ||
260 | mutex_unlock(&ipte_mutex); | ||
261 | } | ||
262 | |||
263 | static void ipte_lock_siif(struct kvm_vcpu *vcpu) | ||
264 | { | ||
265 | union ipte_control old, new, *ic; | ||
266 | |||
267 | ic = &vcpu->kvm->arch.sca->ipte_control; | ||
268 | do { | ||
269 | old = ACCESS_ONCE(*ic); | ||
270 | while (old.kg) { | ||
271 | cond_resched(); | ||
272 | old = ACCESS_ONCE(*ic); | ||
273 | } | ||
274 | new = old; | ||
275 | new.k = 1; | ||
276 | new.kh++; | ||
277 | } while (cmpxchg(&ic->val, old.val, new.val) != old.val); | ||
278 | } | ||
279 | |||
280 | static void ipte_unlock_siif(struct kvm_vcpu *vcpu) | ||
281 | { | ||
282 | union ipte_control old, new, *ic; | ||
283 | |||
284 | ic = &vcpu->kvm->arch.sca->ipte_control; | ||
285 | do { | ||
286 | new = old = ACCESS_ONCE(*ic); | ||
287 | new.kh--; | ||
288 | if (!new.kh) | ||
289 | new.k = 0; | ||
290 | } while (cmpxchg(&ic->val, old.val, new.val) != old.val); | ||
291 | if (!new.kh) | ||
292 | wake_up(&vcpu->kvm->arch.ipte_wq); | ||
293 | } | ||
294 | |||
295 | static void ipte_lock(struct kvm_vcpu *vcpu) | ||
296 | { | ||
297 | if (vcpu->arch.sie_block->eca & 1) | ||
298 | ipte_lock_siif(vcpu); | ||
299 | else | ||
300 | ipte_lock_simple(vcpu); | ||
301 | } | ||
302 | |||
303 | static void ipte_unlock(struct kvm_vcpu *vcpu) | ||
304 | { | ||
305 | if (vcpu->arch.sie_block->eca & 1) | ||
306 | ipte_unlock_siif(vcpu); | ||
307 | else | ||
308 | ipte_unlock_simple(vcpu); | ||
309 | } | ||
310 | |||
311 | static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu) | ||
312 | { | ||
313 | switch (psw_bits(vcpu->arch.sie_block->gpsw).as) { | ||
314 | case PSW_AS_PRIMARY: | ||
315 | return vcpu->arch.sie_block->gcr[1]; | ||
316 | case PSW_AS_SECONDARY: | ||
317 | return vcpu->arch.sie_block->gcr[7]; | ||
318 | case PSW_AS_HOME: | ||
319 | return vcpu->arch.sie_block->gcr[13]; | ||
320 | } | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) | ||
325 | { | ||
326 | return kvm_read_guest(kvm, gpa, val, sizeof(*val)); | ||
327 | } | ||
328 | |||
329 | /** | ||
330 | * guest_translate - translate a guest virtual into a guest absolute address | ||
331 | * @vcpu: virtual cpu | ||
332 | * @gva: guest virtual address | ||
333 | * @gpa: points to where guest physical (absolute) address should be stored | ||
334 | * @write: indicates if access is a write access | ||
335 | * | ||
336 | * Translate a guest virtual address into a guest absolute address by means | ||
337 | * of dynamic address translation as specified by the architecuture. | ||
338 | * If the resulting absolute address is not available in the configuration | ||
339 | * an addressing exception is indicated and @gpa will not be changed. | ||
340 | * | ||
341 | * Returns: - zero on success; @gpa contains the resulting absolute address | ||
342 | * - a negative value if guest access failed due to e.g. broken | ||
343 | * guest mapping | ||
344 | * - a positve value if an access exception happened. In this case | ||
345 | * the returned value is the program interruption code as defined | ||
346 | * by the architecture | ||
347 | */ | ||
348 | static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, | ||
349 | unsigned long *gpa, int write) | ||
350 | { | ||
351 | union vaddress vaddr = {.addr = gva}; | ||
352 | union raddress raddr = {.addr = gva}; | ||
353 | union page_table_entry pte; | ||
354 | int dat_protection = 0; | ||
355 | union ctlreg0 ctlreg0; | ||
356 | unsigned long ptr; | ||
357 | int edat1, edat2; | ||
358 | union asce asce; | ||
359 | |||
360 | ctlreg0.val = vcpu->arch.sie_block->gcr[0]; | ||
361 | edat1 = ctlreg0.edat && test_vfacility(8); | ||
362 | edat2 = edat1 && test_vfacility(78); | ||
363 | asce.val = get_vcpu_asce(vcpu); | ||
364 | if (asce.r) | ||
365 | goto real_address; | ||
366 | ptr = asce.origin * 4096; | ||
367 | switch (asce.dt) { | ||
368 | case ASCE_TYPE_REGION1: | ||
369 | if (vaddr.rfx01 > asce.tl) | ||
370 | return PGM_REGION_FIRST_TRANS; | ||
371 | ptr += vaddr.rfx * 8; | ||
372 | break; | ||
373 | case ASCE_TYPE_REGION2: | ||
374 | if (vaddr.rfx) | ||
375 | return PGM_ASCE_TYPE; | ||
376 | if (vaddr.rsx01 > asce.tl) | ||
377 | return PGM_REGION_SECOND_TRANS; | ||
378 | ptr += vaddr.rsx * 8; | ||
379 | break; | ||
380 | case ASCE_TYPE_REGION3: | ||
381 | if (vaddr.rfx || vaddr.rsx) | ||
382 | return PGM_ASCE_TYPE; | ||
383 | if (vaddr.rtx01 > asce.tl) | ||
384 | return PGM_REGION_THIRD_TRANS; | ||
385 | ptr += vaddr.rtx * 8; | ||
386 | break; | ||
387 | case ASCE_TYPE_SEGMENT: | ||
388 | if (vaddr.rfx || vaddr.rsx || vaddr.rtx) | ||
389 | return PGM_ASCE_TYPE; | ||
390 | if (vaddr.sx01 > asce.tl) | ||
391 | return PGM_SEGMENT_TRANSLATION; | ||
392 | ptr += vaddr.sx * 8; | ||
393 | break; | ||
394 | } | ||
395 | switch (asce.dt) { | ||
396 | case ASCE_TYPE_REGION1: { | ||
397 | union region1_table_entry rfte; | ||
398 | |||
399 | if (kvm_is_error_gpa(vcpu->kvm, ptr)) | ||
400 | return PGM_ADDRESSING; | ||
401 | if (deref_table(vcpu->kvm, ptr, &rfte.val)) | ||
402 | return -EFAULT; | ||
403 | if (rfte.i) | ||
404 | return PGM_REGION_FIRST_TRANS; | ||
405 | if (rfte.tt != TABLE_TYPE_REGION1) | ||
406 | return PGM_TRANSLATION_SPEC; | ||
407 | if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl) | ||
408 | return PGM_REGION_SECOND_TRANS; | ||
409 | if (edat1) | ||
410 | dat_protection |= rfte.p; | ||
411 | ptr = rfte.rto * 4096 + vaddr.rsx * 8; | ||
412 | } | ||
413 | /* fallthrough */ | ||
414 | case ASCE_TYPE_REGION2: { | ||
415 | union region2_table_entry rste; | ||
416 | |||
417 | if (kvm_is_error_gpa(vcpu->kvm, ptr)) | ||
418 | return PGM_ADDRESSING; | ||
419 | if (deref_table(vcpu->kvm, ptr, &rste.val)) | ||
420 | return -EFAULT; | ||
421 | if (rste.i) | ||
422 | return PGM_REGION_SECOND_TRANS; | ||
423 | if (rste.tt != TABLE_TYPE_REGION2) | ||
424 | return PGM_TRANSLATION_SPEC; | ||
425 | if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl) | ||
426 | return PGM_REGION_THIRD_TRANS; | ||
427 | if (edat1) | ||
428 | dat_protection |= rste.p; | ||
429 | ptr = rste.rto * 4096 + vaddr.rtx * 8; | ||
430 | } | ||
431 | /* fallthrough */ | ||
432 | case ASCE_TYPE_REGION3: { | ||
433 | union region3_table_entry rtte; | ||
434 | |||
435 | if (kvm_is_error_gpa(vcpu->kvm, ptr)) | ||
436 | return PGM_ADDRESSING; | ||
437 | if (deref_table(vcpu->kvm, ptr, &rtte.val)) | ||
438 | return -EFAULT; | ||
439 | if (rtte.i) | ||
440 | return PGM_REGION_THIRD_TRANS; | ||
441 | if (rtte.tt != TABLE_TYPE_REGION3) | ||
442 | return PGM_TRANSLATION_SPEC; | ||
443 | if (rtte.cr && asce.p && edat2) | ||
444 | return PGM_TRANSLATION_SPEC; | ||
445 | if (rtte.fc && edat2) { | ||
446 | dat_protection |= rtte.fc1.p; | ||
447 | raddr.rfaa = rtte.fc1.rfaa; | ||
448 | goto absolute_address; | ||
449 | } | ||
450 | if (vaddr.sx01 < rtte.fc0.tf) | ||
451 | return PGM_SEGMENT_TRANSLATION; | ||
452 | if (vaddr.sx01 > rtte.fc0.tl) | ||
453 | return PGM_SEGMENT_TRANSLATION; | ||
454 | if (edat1) | ||
455 | dat_protection |= rtte.fc0.p; | ||
456 | ptr = rtte.fc0.sto * 4096 + vaddr.sx * 8; | ||
457 | } | ||
458 | /* fallthrough */ | ||
459 | case ASCE_TYPE_SEGMENT: { | ||
460 | union segment_table_entry ste; | ||
461 | |||
462 | if (kvm_is_error_gpa(vcpu->kvm, ptr)) | ||
463 | return PGM_ADDRESSING; | ||
464 | if (deref_table(vcpu->kvm, ptr, &ste.val)) | ||
465 | return -EFAULT; | ||
466 | if (ste.i) | ||
467 | return PGM_SEGMENT_TRANSLATION; | ||
468 | if (ste.tt != TABLE_TYPE_SEGMENT) | ||
469 | return PGM_TRANSLATION_SPEC; | ||
470 | if (ste.cs && asce.p) | ||
471 | return PGM_TRANSLATION_SPEC; | ||
472 | if (ste.fc && edat1) { | ||
473 | dat_protection |= ste.fc1.p; | ||
474 | raddr.sfaa = ste.fc1.sfaa; | ||
475 | goto absolute_address; | ||
476 | } | ||
477 | dat_protection |= ste.fc0.p; | ||
478 | ptr = ste.fc0.pto * 2048 + vaddr.px * 8; | ||
479 | } | ||
480 | } | ||
481 | if (kvm_is_error_gpa(vcpu->kvm, ptr)) | ||
482 | return PGM_ADDRESSING; | ||
483 | if (deref_table(vcpu->kvm, ptr, &pte.val)) | ||
484 | return -EFAULT; | ||
485 | if (pte.i) | ||
486 | return PGM_PAGE_TRANSLATION; | ||
487 | if (pte.z) | ||
488 | return PGM_TRANSLATION_SPEC; | ||
489 | if (pte.co && !edat1) | ||
490 | return PGM_TRANSLATION_SPEC; | ||
491 | dat_protection |= pte.p; | ||
492 | raddr.pfra = pte.pfra; | ||
493 | real_address: | ||
494 | raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr); | ||
495 | absolute_address: | ||
496 | if (write && dat_protection) | ||
497 | return PGM_PROTECTION; | ||
498 | if (kvm_is_error_gpa(vcpu->kvm, raddr.addr)) | ||
499 | return PGM_ADDRESSING; | ||
500 | *gpa = raddr.addr; | ||
501 | return 0; | ||
502 | } | ||
503 | |||
504 | static inline int is_low_address(unsigned long ga) | ||
505 | { | ||
506 | /* Check for address ranges 0..511 and 4096..4607 */ | ||
507 | return (ga & ~0x11fful) == 0; | ||
508 | } | ||
509 | |||
510 | static int low_address_protection_enabled(struct kvm_vcpu *vcpu) | ||
511 | { | ||
512 | union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; | ||
513 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | ||
514 | union asce asce; | ||
515 | |||
516 | if (!ctlreg0.lap) | ||
517 | return 0; | ||
518 | asce.val = get_vcpu_asce(vcpu); | ||
519 | if (psw_bits(*psw).t && asce.p) | ||
520 | return 0; | ||
521 | return 1; | ||
522 | } | ||
523 | |||
524 | struct trans_exc_code_bits { | ||
525 | unsigned long addr : 52; /* Translation-exception Address */ | ||
526 | unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */ | ||
527 | unsigned long : 7; | ||
528 | unsigned long b61 : 1; | ||
529 | unsigned long as : 2; /* ASCE Identifier */ | ||
530 | }; | ||
531 | |||
532 | enum { | ||
533 | FSI_UNKNOWN = 0, /* Unknown wether fetch or store */ | ||
534 | FSI_STORE = 1, /* Exception was due to store operation */ | ||
535 | FSI_FETCH = 2 /* Exception was due to fetch operation */ | ||
536 | }; | ||
537 | |||
538 | static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, | ||
539 | unsigned long *pages, unsigned long nr_pages, | ||
540 | int write) | ||
541 | { | ||
542 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; | ||
543 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | ||
544 | struct trans_exc_code_bits *tec_bits; | ||
545 | int lap_enabled, rc; | ||
546 | |||
547 | memset(pgm, 0, sizeof(*pgm)); | ||
548 | tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; | ||
549 | tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; | ||
550 | tec_bits->as = psw_bits(*psw).as; | ||
551 | lap_enabled = low_address_protection_enabled(vcpu); | ||
552 | while (nr_pages) { | ||
553 | ga = kvm_s390_logical_to_effective(vcpu, ga); | ||
554 | tec_bits->addr = ga >> PAGE_SHIFT; | ||
555 | if (write && lap_enabled && is_low_address(ga)) { | ||
556 | pgm->code = PGM_PROTECTION; | ||
557 | return pgm->code; | ||
558 | } | ||
559 | ga &= PAGE_MASK; | ||
560 | if (psw_bits(*psw).t) { | ||
561 | rc = guest_translate(vcpu, ga, pages, write); | ||
562 | if (rc < 0) | ||
563 | return rc; | ||
564 | if (rc == PGM_PROTECTION) | ||
565 | tec_bits->b61 = 1; | ||
566 | if (rc) | ||
567 | pgm->code = rc; | ||
568 | } else { | ||
569 | *pages = kvm_s390_real_to_abs(vcpu, ga); | ||
570 | if (kvm_is_error_gpa(vcpu->kvm, *pages)) | ||
571 | pgm->code = PGM_ADDRESSING; | ||
572 | } | ||
573 | if (pgm->code) | ||
574 | return pgm->code; | ||
575 | ga += PAGE_SIZE; | ||
576 | pages++; | ||
577 | nr_pages--; | ||
578 | } | ||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | ||
583 | unsigned long len, int write) | ||
584 | { | ||
585 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | ||
586 | unsigned long _len, nr_pages, gpa, idx; | ||
587 | unsigned long pages_array[2]; | ||
588 | unsigned long *pages; | ||
589 | int need_ipte_lock; | ||
590 | union asce asce; | ||
591 | int rc; | ||
592 | |||
593 | if (!len) | ||
594 | return 0; | ||
595 | /* Access register mode is not supported yet. */ | ||
596 | if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG) | ||
597 | return -EOPNOTSUPP; | ||
598 | nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; | ||
599 | pages = pages_array; | ||
600 | if (nr_pages > ARRAY_SIZE(pages_array)) | ||
601 | pages = vmalloc(nr_pages * sizeof(unsigned long)); | ||
602 | if (!pages) | ||
603 | return -ENOMEM; | ||
604 | asce.val = get_vcpu_asce(vcpu); | ||
605 | need_ipte_lock = psw_bits(*psw).t && !asce.r; | ||
606 | if (need_ipte_lock) | ||
607 | ipte_lock(vcpu); | ||
608 | rc = guest_page_range(vcpu, ga, pages, nr_pages, write); | ||
609 | for (idx = 0; idx < nr_pages && !rc; idx++) { | ||
610 | gpa = *(pages + idx) + (ga & ~PAGE_MASK); | ||
611 | _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); | ||
612 | if (write) | ||
613 | rc = kvm_write_guest(vcpu->kvm, gpa, data, _len); | ||
614 | else | ||
615 | rc = kvm_read_guest(vcpu->kvm, gpa, data, _len); | ||
616 | len -= _len; | ||
617 | ga += _len; | ||
618 | data += _len; | ||
619 | } | ||
620 | if (need_ipte_lock) | ||
621 | ipte_unlock(vcpu); | ||
622 | if (nr_pages > ARRAY_SIZE(pages_array)) | ||
623 | vfree(pages); | ||
624 | return rc; | ||
625 | } | ||
626 | |||
627 | int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, | ||
628 | void *data, unsigned long len, int write) | ||
629 | { | ||
630 | unsigned long _len, gpa; | ||
631 | int rc = 0; | ||
632 | |||
633 | while (len && !rc) { | ||
634 | gpa = kvm_s390_real_to_abs(vcpu, gra); | ||
635 | _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); | ||
636 | if (write) | ||
637 | rc = write_guest_abs(vcpu, gpa, data, _len); | ||
638 | else | ||
639 | rc = read_guest_abs(vcpu, gpa, data, _len); | ||
640 | len -= _len; | ||
641 | gra += _len; | ||
642 | data += _len; | ||
643 | } | ||
644 | return rc; | ||
645 | } | ||
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 374a439ccc60..1079c8fc6d0d 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * access guest memory | 2 | * access guest memory |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2008, 2009 | 4 | * Copyright IBM Corp. 2008, 2014 |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License (version 2 only) | 7 | * it under the terms of the GNU General Public License (version 2 only) |
@@ -15,100 +15,315 @@ | |||
15 | 15 | ||
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/kvm_host.h> | 17 | #include <linux/kvm_host.h> |
18 | #include <asm/uaccess.h> | 18 | #include <linux/uaccess.h> |
19 | #include <linux/ptrace.h> | ||
19 | #include "kvm-s390.h" | 20 | #include "kvm-s390.h" |
20 | 21 | ||
21 | /* Convert real to absolute address by applying the prefix of the CPU */ | 22 | /** |
23 | * kvm_s390_real_to_abs - convert guest real address to guest absolute address | ||
24 | * @vcpu - guest virtual cpu | ||
25 | * @gra - guest real address | ||
26 | * | ||
27 | * Returns the guest absolute address that corresponds to the passed guest real | ||
28 | * address @gra of a virtual guest cpu by applying its prefix. | ||
29 | */ | ||
22 | static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, | 30 | static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, |
23 | unsigned long gaddr) | 31 | unsigned long gra) |
24 | { | 32 | { |
25 | unsigned long prefix = vcpu->arch.sie_block->prefix; | 33 | unsigned long prefix = vcpu->arch.sie_block->prefix; |
26 | if (gaddr < 2 * PAGE_SIZE) | 34 | |
27 | gaddr += prefix; | 35 | if (gra < 2 * PAGE_SIZE) |
28 | else if (gaddr >= prefix && gaddr < prefix + 2 * PAGE_SIZE) | 36 | gra += prefix; |
29 | gaddr -= prefix; | 37 | else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE) |
30 | return gaddr; | 38 | gra -= prefix; |
39 | return gra; | ||
31 | } | 40 | } |
32 | 41 | ||
33 | static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu, | 42 | /** |
34 | void __user *gptr, | 43 | * kvm_s390_logical_to_effective - convert guest logical to effective address |
35 | int prefixing) | 44 | * @vcpu: guest virtual cpu |
45 | * @ga: guest logical address | ||
46 | * | ||
47 | * Convert a guest vcpu logical address to a guest vcpu effective address by | ||
48 | * applying the rules of the vcpu's addressing mode defined by PSW bits 31 | ||
49 | * and 32 (extendended/basic addressing mode). | ||
50 | * | ||
51 | * Depending on the vcpu's addressing mode the upper 40 bits (24 bit addressing | ||
52 | * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing mode) | ||
53 | * of @ga will be zeroed and the remaining bits will be returned. | ||
54 | */ | ||
55 | static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu, | ||
56 | unsigned long ga) | ||
36 | { | 57 | { |
37 | unsigned long gaddr = (unsigned long) gptr; | 58 | psw_t *psw = &vcpu->arch.sie_block->gpsw; |
38 | unsigned long uaddr; | 59 | |
39 | 60 | if (psw_bits(*psw).eaba == PSW_AMODE_64BIT) | |
40 | if (prefixing) | 61 | return ga; |
41 | gaddr = kvm_s390_real_to_abs(vcpu, gaddr); | 62 | if (psw_bits(*psw).eaba == PSW_AMODE_31BIT) |
42 | uaddr = gmap_fault(gaddr, vcpu->arch.gmap); | 63 | return ga & ((1UL << 31) - 1); |
43 | if (IS_ERR_VALUE(uaddr)) | 64 | return ga & ((1UL << 24) - 1); |
44 | uaddr = -EFAULT; | ||
45 | return (void __user *)uaddr; | ||
46 | } | 65 | } |
47 | 66 | ||
48 | #define get_guest(vcpu, x, gptr) \ | 67 | /* |
49 | ({ \ | 68 | * put_guest_lc, read_guest_lc and write_guest_lc are guest access functions |
50 | __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\ | 69 | * which shall only be used to access the lowcore of a vcpu. |
51 | int __mask = sizeof(__typeof__(*(gptr))) - 1; \ | 70 | * These functions should be used for e.g. interrupt handlers where no |
52 | int __ret; \ | 71 | * guest memory access protection facilities, like key or low address |
53 | \ | 72 | * protection, are applicable. |
54 | if (IS_ERR((void __force *)__uptr)) { \ | 73 | * At a later point guest vcpu lowcore access should happen via pinned |
55 | __ret = PTR_ERR((void __force *)__uptr); \ | 74 | * prefix pages, so that these pages can be accessed directly via the |
56 | } else { \ | 75 | * kernel mapping. All of these *_lc functions can be removed then. |
57 | BUG_ON((unsigned long)__uptr & __mask); \ | 76 | */ |
58 | __ret = get_user(x, __uptr); \ | ||
59 | } \ | ||
60 | __ret; \ | ||
61 | }) | ||
62 | 77 | ||
63 | #define put_guest(vcpu, x, gptr) \ | 78 | /** |
79 | * put_guest_lc - write a simple variable to a guest vcpu's lowcore | ||
80 | * @vcpu: virtual cpu | ||
81 | * @x: value to copy to guest | ||
82 | * @gra: vcpu's destination guest real address | ||
83 | * | ||
84 | * Copies a simple value from kernel space to a guest vcpu's lowcore. | ||
85 | * The size of the variable may be 1, 2, 4 or 8 bytes. The destination | ||
86 | * must be located in the vcpu's lowcore. Otherwise the result is undefined. | ||
87 | * | ||
88 | * Returns zero on success or -EFAULT on error. | ||
89 | * | ||
90 | * Note: an error indicates that either the kernel is out of memory or | ||
91 | * the guest memory mapping is broken. In any case the best solution | ||
92 | * would be to terminate the guest. | ||
93 | * It is wrong to inject a guest exception. | ||
94 | */ | ||
95 | #define put_guest_lc(vcpu, x, gra) \ | ||
64 | ({ \ | 96 | ({ \ |
65 | __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\ | 97 | struct kvm_vcpu *__vcpu = (vcpu); \ |
66 | int __mask = sizeof(__typeof__(*(gptr))) - 1; \ | 98 | __typeof__(*(gra)) __x = (x); \ |
67 | int __ret; \ | 99 | unsigned long __gpa; \ |
68 | \ | 100 | \ |
69 | if (IS_ERR((void __force *)__uptr)) { \ | 101 | __gpa = (unsigned long)(gra); \ |
70 | __ret = PTR_ERR((void __force *)__uptr); \ | 102 | __gpa += __vcpu->arch.sie_block->prefix; \ |
71 | } else { \ | 103 | kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x)); \ |
72 | BUG_ON((unsigned long)__uptr & __mask); \ | ||
73 | __ret = put_user(x, __uptr); \ | ||
74 | } \ | ||
75 | __ret; \ | ||
76 | }) | 104 | }) |
77 | 105 | ||
78 | static inline int __copy_guest(struct kvm_vcpu *vcpu, unsigned long to, | 106 | /** |
79 | unsigned long from, unsigned long len, | 107 | * write_guest_lc - copy data from kernel space to guest vcpu's lowcore |
80 | int to_guest, int prefixing) | 108 | * @vcpu: virtual cpu |
109 | * @gra: vcpu's source guest real address | ||
110 | * @data: source address in kernel space | ||
111 | * @len: number of bytes to copy | ||
112 | * | ||
113 | * Copy data from kernel space to guest vcpu's lowcore. The entire range must | ||
114 | * be located within the vcpu's lowcore, otherwise the result is undefined. | ||
115 | * | ||
116 | * Returns zero on success or -EFAULT on error. | ||
117 | * | ||
118 | * Note: an error indicates that either the kernel is out of memory or | ||
119 | * the guest memory mapping is broken. In any case the best solution | ||
120 | * would be to terminate the guest. | ||
121 | * It is wrong to inject a guest exception. | ||
122 | */ | ||
123 | static inline __must_check | ||
124 | int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, | ||
125 | unsigned long len) | ||
126 | { | ||
127 | unsigned long gpa = gra + vcpu->arch.sie_block->prefix; | ||
128 | |||
129 | return kvm_write_guest(vcpu->kvm, gpa, data, len); | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * read_guest_lc - copy data from guest vcpu's lowcore to kernel space | ||
134 | * @vcpu: virtual cpu | ||
135 | * @gra: vcpu's source guest real address | ||
136 | * @data: destination address in kernel space | ||
137 | * @len: number of bytes to copy | ||
138 | * | ||
139 | * Copy data from guest vcpu's lowcore to kernel space. The entire range must | ||
140 | * be located within the vcpu's lowcore, otherwise the result is undefined. | ||
141 | * | ||
142 | * Returns zero on success or -EFAULT on error. | ||
143 | * | ||
144 | * Note: an error indicates that either the kernel is out of memory or | ||
145 | * the guest memory mapping is broken. In any case the best solution | ||
146 | * would be to terminate the guest. | ||
147 | * It is wrong to inject a guest exception. | ||
148 | */ | ||
149 | static inline __must_check | ||
150 | int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, | ||
151 | unsigned long len) | ||
152 | { | ||
153 | unsigned long gpa = gra + vcpu->arch.sie_block->prefix; | ||
154 | |||
155 | return kvm_read_guest(vcpu->kvm, gpa, data, len); | ||
156 | } | ||
157 | |||
158 | int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | ||
159 | unsigned long len, int write); | ||
160 | |||
161 | int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, | ||
162 | void *data, unsigned long len, int write); | ||
163 | |||
164 | /** | ||
165 | * write_guest - copy data from kernel space to guest space | ||
166 | * @vcpu: virtual cpu | ||
167 | * @ga: guest address | ||
168 | * @data: source address in kernel space | ||
169 | * @len: number of bytes to copy | ||
170 | * | ||
171 | * Copy @len bytes from @data (kernel space) to @ga (guest address). | ||
172 | * In order to copy data to guest space the PSW of the vcpu is inspected: | ||
173 | * If DAT is off data will be copied to guest real or absolute memory. | ||
174 | * If DAT is on data will be copied to the address space as specified by | ||
175 | * the address space bits of the PSW: | ||
176 | * Primary, secondory or home space (access register mode is currently not | ||
177 | * implemented). | ||
178 | * The addressing mode of the PSW is also inspected, so that address wrap | ||
179 | * around is taken into account for 24-, 31- and 64-bit addressing mode, | ||
180 | * if the to be copied data crosses page boundaries in guest address space. | ||
181 | * In addition also low address and DAT protection are inspected before | ||
182 | * copying any data (key protection is currently not implemented). | ||
183 | * | ||
184 | * This function modifies the 'struct kvm_s390_pgm_info pgm' member of @vcpu. | ||
185 | * In case of an access exception (e.g. protection exception) pgm will contain | ||
186 | * all data necessary so that a subsequent call to 'kvm_s390_inject_prog_vcpu()' | ||
187 | * will inject a correct exception into the guest. | ||
188 | * If no access exception happened, the contents of pgm are undefined when | ||
189 | * this function returns. | ||
190 | * | ||
191 | * Returns: - zero on success | ||
192 | * - a negative value if e.g. the guest mapping is broken or in | ||
193 | * case of out-of-memory. In this case the contents of pgm are | ||
194 | * undefined. Also parts of @data may have been copied to guest | ||
195 | * space. | ||
196 | * - a positive value if an access exception happened. In this case | ||
197 | * the returned value is the program interruption code and the | ||
198 | * contents of pgm may be used to inject an exception into the | ||
199 | * guest. No data has been copied to guest space. | ||
200 | * | ||
201 | * Note: in case an access exception is recognized no data has been copied to | ||
202 | * guest space (this is also true, if the to be copied data would cross | ||
203 | * one or more page boundaries in guest space). | ||
204 | * Therefore this function may be used for nullifying and suppressing | ||
205 | * instruction emulation. | ||
206 | * It may also be used for terminating instructions, if it is undefined | ||
207 | * if data has been changed in guest space in case of an exception. | ||
208 | */ | ||
209 | static inline __must_check | ||
210 | int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | ||
211 | unsigned long len) | ||
212 | { | ||
213 | return access_guest(vcpu, ga, data, len, 1); | ||
214 | } | ||
215 | |||
216 | /** | ||
217 | * read_guest - copy data from guest space to kernel space | ||
218 | * @vcpu: virtual cpu | ||
219 | * @ga: guest address | ||
220 | * @data: destination address in kernel space | ||
221 | * @len: number of bytes to copy | ||
222 | * | ||
223 | * Copy @len bytes from @ga (guest address) to @data (kernel space). | ||
224 | * | ||
225 | * The behaviour of read_guest is identical to write_guest, except that | ||
226 | * data will be copied from guest space to kernel space. | ||
227 | */ | ||
228 | static inline __must_check | ||
229 | int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | ||
230 | unsigned long len) | ||
231 | { | ||
232 | return access_guest(vcpu, ga, data, len, 0); | ||
233 | } | ||
234 | |||
235 | /** | ||
236 | * write_guest_abs - copy data from kernel space to guest space absolute | ||
237 | * @vcpu: virtual cpu | ||
238 | * @gpa: guest physical (absolute) address | ||
239 | * @data: source address in kernel space | ||
240 | * @len: number of bytes to copy | ||
241 | * | ||
242 | * Copy @len bytes from @data (kernel space) to @gpa (guest absolute address). | ||
243 | * It is up to the caller to ensure that the entire guest memory range is | ||
244 | * valid memory before calling this function. | ||
245 | * Guest low address and key protection are not checked. | ||
246 | * | ||
247 | * Returns zero on success or -EFAULT on error. | ||
248 | * | ||
249 | * If an error occurs data may have been copied partially to guest memory. | ||
250 | */ | ||
251 | static inline __must_check | ||
252 | int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data, | ||
253 | unsigned long len) | ||
254 | { | ||
255 | return kvm_write_guest(vcpu->kvm, gpa, data, len); | ||
256 | } | ||
257 | |||
258 | /** | ||
259 | * read_guest_abs - copy data from guest space absolute to kernel space | ||
260 | * @vcpu: virtual cpu | ||
261 | * @gpa: guest physical (absolute) address | ||
262 | * @data: destination address in kernel space | ||
263 | * @len: number of bytes to copy | ||
264 | * | ||
265 | * Copy @len bytes from @gpa (guest absolute address) to @data (kernel space). | ||
266 | * It is up to the caller to ensure that the entire guest memory range is | ||
267 | * valid memory before calling this function. | ||
268 | * Guest key protection is not checked. | ||
269 | * | ||
270 | * Returns zero on success or -EFAULT on error. | ||
271 | * | ||
272 | * If an error occurs data may have been copied partially to kernel space. | ||
273 | */ | ||
274 | static inline __must_check | ||
275 | int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data, | ||
276 | unsigned long len) | ||
277 | { | ||
278 | return kvm_read_guest(vcpu->kvm, gpa, data, len); | ||
279 | } | ||
280 | |||
281 | /** | ||
282 | * write_guest_real - copy data from kernel space to guest space real | ||
283 | * @vcpu: virtual cpu | ||
284 | * @gra: guest real address | ||
285 | * @data: source address in kernel space | ||
286 | * @len: number of bytes to copy | ||
287 | * | ||
288 | * Copy @len bytes from @data (kernel space) to @gra (guest real address). | ||
289 | * It is up to the caller to ensure that the entire guest memory range is | ||
290 | * valid memory before calling this function. | ||
291 | * Guest low address and key protection are not checked. | ||
292 | * | ||
293 | * Returns zero on success or -EFAULT on error. | ||
294 | * | ||
295 | * If an error occurs data may have been copied partially to guest memory. | ||
296 | */ | ||
297 | static inline __must_check | ||
298 | int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, | ||
299 | unsigned long len) | ||
300 | { | ||
301 | return access_guest_real(vcpu, gra, data, len, 1); | ||
302 | } | ||
303 | |||
304 | /** | ||
305 | * read_guest_real - copy data from guest space real to kernel space | ||
306 | * @vcpu: virtual cpu | ||
307 | * @gra: guest real address | ||
308 | * @data: destination address in kernel space | ||
309 | * @len: number of bytes to copy | ||
310 | * | ||
311 | * Copy @len bytes from @gra (guest real address) to @data (kernel space). | ||
312 | * It is up to the caller to ensure that the entire guest memory range is | ||
313 | * valid memory before calling this function. | ||
314 | * Guest key protection is not checked. | ||
315 | * | ||
316 | * Returns zero on success or -EFAULT on error. | ||
317 | * | ||
318 | * If an error occurs data may have been copied partially to kernel space. | ||
319 | */ | ||
320 | static inline __must_check | ||
321 | int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, | ||
322 | unsigned long len) | ||
81 | { | 323 | { |
82 | unsigned long _len, rc; | 324 | return access_guest_real(vcpu, gra, data, len, 0); |
83 | void __user *uptr; | ||
84 | |||
85 | while (len) { | ||
86 | uptr = to_guest ? (void __user *)to : (void __user *)from; | ||
87 | uptr = __gptr_to_uptr(vcpu, uptr, prefixing); | ||
88 | if (IS_ERR((void __force *)uptr)) | ||
89 | return -EFAULT; | ||
90 | _len = PAGE_SIZE - ((unsigned long)uptr & (PAGE_SIZE - 1)); | ||
91 | _len = min(_len, len); | ||
92 | if (to_guest) | ||
93 | rc = copy_to_user((void __user *) uptr, (void *)from, _len); | ||
94 | else | ||
95 | rc = copy_from_user((void *)to, (void __user *)uptr, _len); | ||
96 | if (rc) | ||
97 | return -EFAULT; | ||
98 | len -= _len; | ||
99 | from += _len; | ||
100 | to += _len; | ||
101 | } | ||
102 | return 0; | ||
103 | } | 325 | } |
104 | 326 | ||
105 | #define copy_to_guest(vcpu, to, from, size) \ | 327 | int ipte_lock_held(struct kvm_vcpu *vcpu); |
106 | __copy_guest(vcpu, to, (unsigned long)from, size, 1, 1) | ||
107 | #define copy_from_guest(vcpu, to, from, size) \ | ||
108 | __copy_guest(vcpu, (unsigned long)to, from, size, 0, 1) | ||
109 | #define copy_to_guest_absolute(vcpu, to, from, size) \ | ||
110 | __copy_guest(vcpu, to, (unsigned long)from, size, 1, 0) | ||
111 | #define copy_from_guest_absolute(vcpu, to, from, size) \ | ||
112 | __copy_guest(vcpu, (unsigned long)to, from, size, 0, 0) | ||
113 | 328 | ||
114 | #endif /* __KVM_S390_GACCESS_H */ | 329 | #endif /* __KVM_S390_GACCESS_H */ |
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c new file mode 100644 index 000000000000..757ccef62fd5 --- /dev/null +++ b/arch/s390/kvm/guestdbg.c | |||
@@ -0,0 +1,481 @@ | |||
1 | /* | ||
2 | * kvm guest debug support | ||
3 | * | ||
4 | * Copyright IBM Corp. 2014 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License (version 2 only) | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> | ||
11 | */ | ||
12 | #include <linux/kvm_host.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include "kvm-s390.h" | ||
15 | #include "gaccess.h" | ||
16 | |||
17 | /* | ||
18 | * Extends the address range given by *start and *stop to include the address | ||
19 | * range starting with estart and the length len. Takes care of overflowing | ||
20 | * intervals and tries to minimize the overall intervall size. | ||
21 | */ | ||
22 | static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len) | ||
23 | { | ||
24 | u64 estop; | ||
25 | |||
26 | if (len > 0) | ||
27 | len--; | ||
28 | else | ||
29 | len = 0; | ||
30 | |||
31 | estop = estart + len; | ||
32 | |||
33 | /* 0-0 range represents "not set" */ | ||
34 | if ((*start == 0) && (*stop == 0)) { | ||
35 | *start = estart; | ||
36 | *stop = estop; | ||
37 | } else if (*start <= *stop) { | ||
38 | /* increase the existing range */ | ||
39 | if (estart < *start) | ||
40 | *start = estart; | ||
41 | if (estop > *stop) | ||
42 | *stop = estop; | ||
43 | } else { | ||
44 | /* "overflowing" interval, whereby *stop > *start */ | ||
45 | if (estart <= *stop) { | ||
46 | if (estop > *stop) | ||
47 | *stop = estop; | ||
48 | } else if (estop > *start) { | ||
49 | if (estart < *start) | ||
50 | *start = estart; | ||
51 | } | ||
52 | /* minimize the range */ | ||
53 | else if ((estop - *stop) < (*start - estart)) | ||
54 | *stop = estop; | ||
55 | else | ||
56 | *start = estart; | ||
57 | } | ||
58 | } | ||
59 | |||
60 | #define MAX_INST_SIZE 6 | ||
61 | |||
62 | static void enable_all_hw_bp(struct kvm_vcpu *vcpu) | ||
63 | { | ||
64 | unsigned long start, len; | ||
65 | u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; | ||
66 | u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; | ||
67 | u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; | ||
68 | int i; | ||
69 | |||
70 | if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || | ||
71 | vcpu->arch.guestdbg.hw_bp_info == NULL) | ||
72 | return; | ||
73 | |||
74 | /* | ||
75 | * If the guest is not interrested in branching events, we can savely | ||
76 | * limit them to the PER address range. | ||
77 | */ | ||
78 | if (!(*cr9 & PER_EVENT_BRANCH)) | ||
79 | *cr9 |= PER_CONTROL_BRANCH_ADDRESS; | ||
80 | *cr9 |= PER_EVENT_IFETCH | PER_EVENT_BRANCH; | ||
81 | |||
82 | for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { | ||
83 | start = vcpu->arch.guestdbg.hw_bp_info[i].addr; | ||
84 | len = vcpu->arch.guestdbg.hw_bp_info[i].len; | ||
85 | |||
86 | /* | ||
87 | * The instruction in front of the desired bp has to | ||
88 | * report instruction-fetching events | ||
89 | */ | ||
90 | if (start < MAX_INST_SIZE) { | ||
91 | len += start; | ||
92 | start = 0; | ||
93 | } else { | ||
94 | start -= MAX_INST_SIZE; | ||
95 | len += MAX_INST_SIZE; | ||
96 | } | ||
97 | |||
98 | extend_address_range(cr10, cr11, start, len); | ||
99 | } | ||
100 | } | ||
101 | |||
102 | static void enable_all_hw_wp(struct kvm_vcpu *vcpu) | ||
103 | { | ||
104 | unsigned long start, len; | ||
105 | u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; | ||
106 | u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; | ||
107 | u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; | ||
108 | int i; | ||
109 | |||
110 | if (vcpu->arch.guestdbg.nr_hw_wp <= 0 || | ||
111 | vcpu->arch.guestdbg.hw_wp_info == NULL) | ||
112 | return; | ||
113 | |||
114 | /* if host uses storage alternation for special address | ||
115 | * spaces, enable all events and give all to the guest */ | ||
116 | if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) { | ||
117 | *cr9 &= ~PER_CONTROL_ALTERATION; | ||
118 | *cr10 = 0; | ||
119 | *cr11 = PSW_ADDR_INSN; | ||
120 | } else { | ||
121 | *cr9 &= ~PER_CONTROL_ALTERATION; | ||
122 | *cr9 |= PER_EVENT_STORE; | ||
123 | |||
124 | for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { | ||
125 | start = vcpu->arch.guestdbg.hw_wp_info[i].addr; | ||
126 | len = vcpu->arch.guestdbg.hw_wp_info[i].len; | ||
127 | |||
128 | extend_address_range(cr10, cr11, start, len); | ||
129 | } | ||
130 | } | ||
131 | } | ||
132 | |||
133 | void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu) | ||
134 | { | ||
135 | vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0]; | ||
136 | vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9]; | ||
137 | vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10]; | ||
138 | vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11]; | ||
139 | } | ||
140 | |||
141 | void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu) | ||
142 | { | ||
143 | vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0; | ||
144 | vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9; | ||
145 | vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10; | ||
146 | vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11; | ||
147 | } | ||
148 | |||
149 | void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu) | ||
150 | { | ||
151 | /* | ||
152 | * TODO: if guest psw has per enabled, otherwise 0s! | ||
153 | * This reduces the amount of reported events. | ||
154 | * Need to intercept all psw changes! | ||
155 | */ | ||
156 | |||
157 | if (guestdbg_sstep_enabled(vcpu)) { | ||
158 | /* disable timer (clock-comparator) interrupts */ | ||
159 | vcpu->arch.sie_block->gcr[0] &= ~0x800ul; | ||
160 | vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH; | ||
161 | vcpu->arch.sie_block->gcr[10] = 0; | ||
162 | vcpu->arch.sie_block->gcr[11] = PSW_ADDR_INSN; | ||
163 | } | ||
164 | |||
165 | if (guestdbg_hw_bp_enabled(vcpu)) { | ||
166 | enable_all_hw_bp(vcpu); | ||
167 | enable_all_hw_wp(vcpu); | ||
168 | } | ||
169 | |||
170 | /* TODO: Instruction-fetching-nullification not allowed for now */ | ||
171 | if (vcpu->arch.sie_block->gcr[9] & PER_EVENT_NULLIFICATION) | ||
172 | vcpu->arch.sie_block->gcr[9] &= ~PER_EVENT_NULLIFICATION; | ||
173 | } | ||
174 | |||
175 | #define MAX_WP_SIZE 100 | ||
176 | |||
177 | static int __import_wp_info(struct kvm_vcpu *vcpu, | ||
178 | struct kvm_hw_breakpoint *bp_data, | ||
179 | struct kvm_hw_wp_info_arch *wp_info) | ||
180 | { | ||
181 | int ret = 0; | ||
182 | wp_info->len = bp_data->len; | ||
183 | wp_info->addr = bp_data->addr; | ||
184 | wp_info->phys_addr = bp_data->phys_addr; | ||
185 | wp_info->old_data = NULL; | ||
186 | |||
187 | if (wp_info->len < 0 || wp_info->len > MAX_WP_SIZE) | ||
188 | return -EINVAL; | ||
189 | |||
190 | wp_info->old_data = kmalloc(bp_data->len, GFP_KERNEL); | ||
191 | if (!wp_info->old_data) | ||
192 | return -ENOMEM; | ||
193 | /* try to backup the original value */ | ||
194 | ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data, | ||
195 | wp_info->len); | ||
196 | if (ret) { | ||
197 | kfree(wp_info->old_data); | ||
198 | wp_info->old_data = NULL; | ||
199 | } | ||
200 | |||
201 | return ret; | ||
202 | } | ||
203 | |||
204 | #define MAX_BP_COUNT 50 | ||
205 | |||
206 | int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu, | ||
207 | struct kvm_guest_debug *dbg) | ||
208 | { | ||
209 | int ret = 0, nr_wp = 0, nr_bp = 0, i, size; | ||
210 | struct kvm_hw_breakpoint *bp_data = NULL; | ||
211 | struct kvm_hw_wp_info_arch *wp_info = NULL; | ||
212 | struct kvm_hw_bp_info_arch *bp_info = NULL; | ||
213 | |||
214 | if (dbg->arch.nr_hw_bp <= 0 || !dbg->arch.hw_bp) | ||
215 | return 0; | ||
216 | else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT) | ||
217 | return -EINVAL; | ||
218 | |||
219 | size = dbg->arch.nr_hw_bp * sizeof(struct kvm_hw_breakpoint); | ||
220 | bp_data = kmalloc(size, GFP_KERNEL); | ||
221 | if (!bp_data) { | ||
222 | ret = -ENOMEM; | ||
223 | goto error; | ||
224 | } | ||
225 | |||
226 | ret = copy_from_user(bp_data, dbg->arch.hw_bp, size); | ||
227 | if (ret) | ||
228 | goto error; | ||
229 | |||
230 | for (i = 0; i < dbg->arch.nr_hw_bp; i++) { | ||
231 | switch (bp_data[i].type) { | ||
232 | case KVM_HW_WP_WRITE: | ||
233 | nr_wp++; | ||
234 | break; | ||
235 | case KVM_HW_BP: | ||
236 | nr_bp++; | ||
237 | break; | ||
238 | default: | ||
239 | break; | ||
240 | } | ||
241 | } | ||
242 | |||
243 | size = nr_wp * sizeof(struct kvm_hw_wp_info_arch); | ||
244 | if (size > 0) { | ||
245 | wp_info = kmalloc(size, GFP_KERNEL); | ||
246 | if (!wp_info) { | ||
247 | ret = -ENOMEM; | ||
248 | goto error; | ||
249 | } | ||
250 | } | ||
251 | size = nr_bp * sizeof(struct kvm_hw_bp_info_arch); | ||
252 | if (size > 0) { | ||
253 | bp_info = kmalloc(size, GFP_KERNEL); | ||
254 | if (!bp_info) { | ||
255 | ret = -ENOMEM; | ||
256 | goto error; | ||
257 | } | ||
258 | } | ||
259 | |||
260 | for (nr_wp = 0, nr_bp = 0, i = 0; i < dbg->arch.nr_hw_bp; i++) { | ||
261 | switch (bp_data[i].type) { | ||
262 | case KVM_HW_WP_WRITE: | ||
263 | ret = __import_wp_info(vcpu, &bp_data[i], | ||
264 | &wp_info[nr_wp]); | ||
265 | if (ret) | ||
266 | goto error; | ||
267 | nr_wp++; | ||
268 | break; | ||
269 | case KVM_HW_BP: | ||
270 | bp_info[nr_bp].len = bp_data[i].len; | ||
271 | bp_info[nr_bp].addr = bp_data[i].addr; | ||
272 | nr_bp++; | ||
273 | break; | ||
274 | } | ||
275 | } | ||
276 | |||
277 | vcpu->arch.guestdbg.nr_hw_bp = nr_bp; | ||
278 | vcpu->arch.guestdbg.hw_bp_info = bp_info; | ||
279 | vcpu->arch.guestdbg.nr_hw_wp = nr_wp; | ||
280 | vcpu->arch.guestdbg.hw_wp_info = wp_info; | ||
281 | return 0; | ||
282 | error: | ||
283 | kfree(bp_data); | ||
284 | kfree(wp_info); | ||
285 | kfree(bp_info); | ||
286 | return ret; | ||
287 | } | ||
288 | |||
289 | void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu) | ||
290 | { | ||
291 | int i; | ||
292 | struct kvm_hw_wp_info_arch *hw_wp_info = NULL; | ||
293 | |||
294 | for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { | ||
295 | hw_wp_info = &vcpu->arch.guestdbg.hw_wp_info[i]; | ||
296 | kfree(hw_wp_info->old_data); | ||
297 | hw_wp_info->old_data = NULL; | ||
298 | } | ||
299 | kfree(vcpu->arch.guestdbg.hw_wp_info); | ||
300 | vcpu->arch.guestdbg.hw_wp_info = NULL; | ||
301 | |||
302 | kfree(vcpu->arch.guestdbg.hw_bp_info); | ||
303 | vcpu->arch.guestdbg.hw_bp_info = NULL; | ||
304 | |||
305 | vcpu->arch.guestdbg.nr_hw_wp = 0; | ||
306 | vcpu->arch.guestdbg.nr_hw_bp = 0; | ||
307 | } | ||
308 | |||
309 | static inline int in_addr_range(u64 addr, u64 a, u64 b) | ||
310 | { | ||
311 | if (a <= b) | ||
312 | return (addr >= a) && (addr <= b); | ||
313 | else | ||
314 | /* "overflowing" interval */ | ||
315 | return (addr <= a) && (addr >= b); | ||
316 | } | ||
317 | |||
318 | #define end_of_range(bp_info) (bp_info->addr + bp_info->len - 1) | ||
319 | |||
320 | static struct kvm_hw_bp_info_arch *find_hw_bp(struct kvm_vcpu *vcpu, | ||
321 | unsigned long addr) | ||
322 | { | ||
323 | struct kvm_hw_bp_info_arch *bp_info = vcpu->arch.guestdbg.hw_bp_info; | ||
324 | int i; | ||
325 | |||
326 | if (vcpu->arch.guestdbg.nr_hw_bp == 0) | ||
327 | return NULL; | ||
328 | |||
329 | for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { | ||
330 | /* addr is directly the start or in the range of a bp */ | ||
331 | if (addr == bp_info->addr) | ||
332 | goto found; | ||
333 | if (bp_info->len > 0 && | ||
334 | in_addr_range(addr, bp_info->addr, end_of_range(bp_info))) | ||
335 | goto found; | ||
336 | |||
337 | bp_info++; | ||
338 | } | ||
339 | |||
340 | return NULL; | ||
341 | found: | ||
342 | return bp_info; | ||
343 | } | ||
344 | |||
345 | static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu) | ||
346 | { | ||
347 | int i; | ||
348 | struct kvm_hw_wp_info_arch *wp_info = NULL; | ||
349 | void *temp = NULL; | ||
350 | |||
351 | if (vcpu->arch.guestdbg.nr_hw_wp == 0) | ||
352 | return NULL; | ||
353 | |||
354 | for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { | ||
355 | wp_info = &vcpu->arch.guestdbg.hw_wp_info[i]; | ||
356 | if (!wp_info || !wp_info->old_data || wp_info->len <= 0) | ||
357 | continue; | ||
358 | |||
359 | temp = kmalloc(wp_info->len, GFP_KERNEL); | ||
360 | if (!temp) | ||
361 | continue; | ||
362 | |||
363 | /* refetch the wp data and compare it to the old value */ | ||
364 | if (!read_guest(vcpu, wp_info->phys_addr, temp, | ||
365 | wp_info->len)) { | ||
366 | if (memcmp(temp, wp_info->old_data, wp_info->len)) { | ||
367 | kfree(temp); | ||
368 | return wp_info; | ||
369 | } | ||
370 | } | ||
371 | kfree(temp); | ||
372 | temp = NULL; | ||
373 | } | ||
374 | |||
375 | return NULL; | ||
376 | } | ||
377 | |||
378 | void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu) | ||
379 | { | ||
380 | vcpu->run->exit_reason = KVM_EXIT_DEBUG; | ||
381 | vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING; | ||
382 | } | ||
383 | |||
384 | #define per_bp_event(code) \ | ||
385 | (code & (PER_EVENT_IFETCH | PER_EVENT_BRANCH)) | ||
386 | #define per_write_wp_event(code) \ | ||
387 | (code & (PER_EVENT_STORE | PER_EVENT_STORE_REAL)) | ||
388 | |||
389 | static int debug_exit_required(struct kvm_vcpu *vcpu) | ||
390 | { | ||
391 | u32 perc = (vcpu->arch.sie_block->perc << 24); | ||
392 | struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch; | ||
393 | struct kvm_hw_wp_info_arch *wp_info = NULL; | ||
394 | struct kvm_hw_bp_info_arch *bp_info = NULL; | ||
395 | unsigned long addr = vcpu->arch.sie_block->gpsw.addr; | ||
396 | unsigned long peraddr = vcpu->arch.sie_block->peraddr; | ||
397 | |||
398 | if (guestdbg_hw_bp_enabled(vcpu)) { | ||
399 | if (per_write_wp_event(perc) && | ||
400 | vcpu->arch.guestdbg.nr_hw_wp > 0) { | ||
401 | wp_info = any_wp_changed(vcpu); | ||
402 | if (wp_info) { | ||
403 | debug_exit->addr = wp_info->addr; | ||
404 | debug_exit->type = KVM_HW_WP_WRITE; | ||
405 | goto exit_required; | ||
406 | } | ||
407 | } | ||
408 | if (per_bp_event(perc) && | ||
409 | vcpu->arch.guestdbg.nr_hw_bp > 0) { | ||
410 | bp_info = find_hw_bp(vcpu, addr); | ||
411 | /* remove duplicate events if PC==PER address */ | ||
412 | if (bp_info && (addr != peraddr)) { | ||
413 | debug_exit->addr = addr; | ||
414 | debug_exit->type = KVM_HW_BP; | ||
415 | vcpu->arch.guestdbg.last_bp = addr; | ||
416 | goto exit_required; | ||
417 | } | ||
418 | /* breakpoint missed */ | ||
419 | bp_info = find_hw_bp(vcpu, peraddr); | ||
420 | if (bp_info && vcpu->arch.guestdbg.last_bp != peraddr) { | ||
421 | debug_exit->addr = peraddr; | ||
422 | debug_exit->type = KVM_HW_BP; | ||
423 | goto exit_required; | ||
424 | } | ||
425 | } | ||
426 | } | ||
427 | if (guestdbg_sstep_enabled(vcpu) && per_bp_event(perc)) { | ||
428 | debug_exit->addr = addr; | ||
429 | debug_exit->type = KVM_SINGLESTEP; | ||
430 | goto exit_required; | ||
431 | } | ||
432 | |||
433 | return 0; | ||
434 | exit_required: | ||
435 | return 1; | ||
436 | } | ||
437 | |||
438 | #define guest_per_enabled(vcpu) \ | ||
439 | (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) | ||
440 | |||
441 | static void filter_guest_per_event(struct kvm_vcpu *vcpu) | ||
442 | { | ||
443 | u32 perc = vcpu->arch.sie_block->perc << 24; | ||
444 | u64 peraddr = vcpu->arch.sie_block->peraddr; | ||
445 | u64 addr = vcpu->arch.sie_block->gpsw.addr; | ||
446 | u64 cr9 = vcpu->arch.sie_block->gcr[9]; | ||
447 | u64 cr10 = vcpu->arch.sie_block->gcr[10]; | ||
448 | u64 cr11 = vcpu->arch.sie_block->gcr[11]; | ||
449 | /* filter all events, demanded by the guest */ | ||
450 | u32 guest_perc = perc & cr9 & PER_EVENT_MASK; | ||
451 | |||
452 | if (!guest_per_enabled(vcpu)) | ||
453 | guest_perc = 0; | ||
454 | |||
455 | /* filter "successful-branching" events */ | ||
456 | if (guest_perc & PER_EVENT_BRANCH && | ||
457 | cr9 & PER_CONTROL_BRANCH_ADDRESS && | ||
458 | !in_addr_range(addr, cr10, cr11)) | ||
459 | guest_perc &= ~PER_EVENT_BRANCH; | ||
460 | |||
461 | /* filter "instruction-fetching" events */ | ||
462 | if (guest_perc & PER_EVENT_IFETCH && | ||
463 | !in_addr_range(peraddr, cr10, cr11)) | ||
464 | guest_perc &= ~PER_EVENT_IFETCH; | ||
465 | |||
466 | /* All other PER events will be given to the guest */ | ||
467 | /* TODO: Check alterated address/address space */ | ||
468 | |||
469 | vcpu->arch.sie_block->perc = guest_perc >> 24; | ||
470 | |||
471 | if (!guest_perc) | ||
472 | vcpu->arch.sie_block->iprcc &= ~PGM_PER; | ||
473 | } | ||
474 | |||
475 | void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu) | ||
476 | { | ||
477 | if (debug_exit_required(vcpu)) | ||
478 | vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING; | ||
479 | |||
480 | filter_guest_per_event(vcpu); | ||
481 | } | ||
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index eeb1ac7d8fa4..30e1c5eb726a 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/pagemap.h> | 16 | #include <linux/pagemap.h> |
17 | 17 | ||
18 | #include <asm/kvm_host.h> | 18 | #include <asm/kvm_host.h> |
19 | #include <asm/asm-offsets.h> | ||
19 | 20 | ||
20 | #include "kvm-s390.h" | 21 | #include "kvm-s390.h" |
21 | #include "gaccess.h" | 22 | #include "gaccess.h" |
@@ -29,6 +30,7 @@ static const intercept_handler_t instruction_handlers[256] = { | |||
29 | [0x83] = kvm_s390_handle_diag, | 30 | [0x83] = kvm_s390_handle_diag, |
30 | [0xae] = kvm_s390_handle_sigp, | 31 | [0xae] = kvm_s390_handle_sigp, |
31 | [0xb2] = kvm_s390_handle_b2, | 32 | [0xb2] = kvm_s390_handle_b2, |
33 | [0xb6] = kvm_s390_handle_stctl, | ||
32 | [0xb7] = kvm_s390_handle_lctl, | 34 | [0xb7] = kvm_s390_handle_lctl, |
33 | [0xb9] = kvm_s390_handle_b9, | 35 | [0xb9] = kvm_s390_handle_b9, |
34 | [0xe5] = kvm_s390_handle_e5, | 36 | [0xe5] = kvm_s390_handle_e5, |
@@ -109,22 +111,112 @@ static int handle_instruction(struct kvm_vcpu *vcpu) | |||
109 | return -EOPNOTSUPP; | 111 | return -EOPNOTSUPP; |
110 | } | 112 | } |
111 | 113 | ||
114 | static void __extract_prog_irq(struct kvm_vcpu *vcpu, | ||
115 | struct kvm_s390_pgm_info *pgm_info) | ||
116 | { | ||
117 | memset(pgm_info, 0, sizeof(struct kvm_s390_pgm_info)); | ||
118 | pgm_info->code = vcpu->arch.sie_block->iprcc; | ||
119 | |||
120 | switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) { | ||
121 | case PGM_AFX_TRANSLATION: | ||
122 | case PGM_ASX_TRANSLATION: | ||
123 | case PGM_EX_TRANSLATION: | ||
124 | case PGM_LFX_TRANSLATION: | ||
125 | case PGM_LSTE_SEQUENCE: | ||
126 | case PGM_LSX_TRANSLATION: | ||
127 | case PGM_LX_TRANSLATION: | ||
128 | case PGM_PRIMARY_AUTHORITY: | ||
129 | case PGM_SECONDARY_AUTHORITY: | ||
130 | case PGM_SPACE_SWITCH: | ||
131 | pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc; | ||
132 | break; | ||
133 | case PGM_ALEN_TRANSLATION: | ||
134 | case PGM_ALE_SEQUENCE: | ||
135 | case PGM_ASTE_INSTANCE: | ||
136 | case PGM_ASTE_SEQUENCE: | ||
137 | case PGM_ASTE_VALIDITY: | ||
138 | case PGM_EXTENDED_AUTHORITY: | ||
139 | pgm_info->exc_access_id = vcpu->arch.sie_block->eai; | ||
140 | break; | ||
141 | case PGM_ASCE_TYPE: | ||
142 | case PGM_PAGE_TRANSLATION: | ||
143 | case PGM_REGION_FIRST_TRANS: | ||
144 | case PGM_REGION_SECOND_TRANS: | ||
145 | case PGM_REGION_THIRD_TRANS: | ||
146 | case PGM_SEGMENT_TRANSLATION: | ||
147 | pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc; | ||
148 | pgm_info->exc_access_id = vcpu->arch.sie_block->eai; | ||
149 | pgm_info->op_access_id = vcpu->arch.sie_block->oai; | ||
150 | break; | ||
151 | case PGM_MONITOR: | ||
152 | pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn; | ||
153 | pgm_info->mon_code = vcpu->arch.sie_block->tecmc; | ||
154 | break; | ||
155 | case PGM_DATA: | ||
156 | pgm_info->data_exc_code = vcpu->arch.sie_block->dxc; | ||
157 | break; | ||
158 | case PGM_PROTECTION: | ||
159 | pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc; | ||
160 | pgm_info->exc_access_id = vcpu->arch.sie_block->eai; | ||
161 | break; | ||
162 | default: | ||
163 | break; | ||
164 | } | ||
165 | |||
166 | if (vcpu->arch.sie_block->iprcc & PGM_PER) { | ||
167 | pgm_info->per_code = vcpu->arch.sie_block->perc; | ||
168 | pgm_info->per_atmid = vcpu->arch.sie_block->peratmid; | ||
169 | pgm_info->per_address = vcpu->arch.sie_block->peraddr; | ||
170 | pgm_info->per_access_id = vcpu->arch.sie_block->peraid; | ||
171 | } | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * restore ITDB to program-interruption TDB in guest lowcore | ||
176 | * and set TX abort indication if required | ||
177 | */ | ||
178 | static int handle_itdb(struct kvm_vcpu *vcpu) | ||
179 | { | ||
180 | struct kvm_s390_itdb *itdb; | ||
181 | int rc; | ||
182 | |||
183 | if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu)) | ||
184 | return 0; | ||
185 | if (current->thread.per_flags & PER_FLAG_NO_TE) | ||
186 | return 0; | ||
187 | itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba; | ||
188 | rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb)); | ||
189 | if (rc) | ||
190 | return rc; | ||
191 | memset(itdb, 0, sizeof(*itdb)); | ||
192 | |||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER) | ||
197 | |||
112 | static int handle_prog(struct kvm_vcpu *vcpu) | 198 | static int handle_prog(struct kvm_vcpu *vcpu) |
113 | { | 199 | { |
200 | struct kvm_s390_pgm_info pgm_info; | ||
201 | int rc; | ||
202 | |||
114 | vcpu->stat.exit_program_interruption++; | 203 | vcpu->stat.exit_program_interruption++; |
115 | 204 | ||
116 | /* Restore ITDB to Program-Interruption TDB in guest memory */ | 205 | if (guestdbg_enabled(vcpu) && per_event(vcpu)) { |
117 | if (IS_TE_ENABLED(vcpu) && | 206 | kvm_s390_handle_per_event(vcpu); |
118 | !(current->thread.per_flags & PER_FLAG_NO_TE) && | 207 | /* the interrupt might have been filtered out completely */ |
119 | IS_ITDB_VALID(vcpu)) { | 208 | if (vcpu->arch.sie_block->iprcc == 0) |
120 | copy_to_guest(vcpu, TDB_ADDR, vcpu->arch.sie_block->itdba, | 209 | return 0; |
121 | sizeof(struct kvm_s390_itdb)); | ||
122 | memset((void *) vcpu->arch.sie_block->itdba, 0, | ||
123 | sizeof(struct kvm_s390_itdb)); | ||
124 | } | 210 | } |
125 | 211 | ||
126 | trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc); | 212 | trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc); |
127 | return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc); | 213 | |
214 | rc = handle_itdb(vcpu); | ||
215 | if (rc) | ||
216 | return rc; | ||
217 | |||
218 | __extract_prog_irq(vcpu, &pgm_info); | ||
219 | return kvm_s390_inject_prog_irq(vcpu, &pgm_info); | ||
128 | } | 220 | } |
129 | 221 | ||
130 | static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) | 222 | static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 200a8f9390b6..077e4738ebdc 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -56,6 +56,17 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) | |||
56 | return 1; | 56 | return 1; |
57 | } | 57 | } |
58 | 58 | ||
59 | static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) | ||
60 | { | ||
61 | if (psw_extint_disabled(vcpu) || | ||
62 | !(vcpu->arch.sie_block->gcr[0] & 0x800ul)) | ||
63 | return 0; | ||
64 | if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu)) | ||
65 | /* No timer interrupts when single stepping */ | ||
66 | return 0; | ||
67 | return 1; | ||
68 | } | ||
69 | |||
59 | static u64 int_word_to_isc_bits(u32 int_word) | 70 | static u64 int_word_to_isc_bits(u32 int_word) |
60 | { | 71 | { |
61 | u8 isc = (int_word & 0x38000000) >> 27; | 72 | u8 isc = (int_word & 0x38000000) >> 27; |
@@ -131,7 +142,13 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) | |||
131 | CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, | 142 | CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, |
132 | &vcpu->arch.sie_block->cpuflags); | 143 | &vcpu->arch.sie_block->cpuflags); |
133 | vcpu->arch.sie_block->lctl = 0x0000; | 144 | vcpu->arch.sie_block->lctl = 0x0000; |
134 | vcpu->arch.sie_block->ictl &= ~ICTL_LPSW; | 145 | vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); |
146 | |||
147 | if (guestdbg_enabled(vcpu)) { | ||
148 | vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 | | ||
149 | LCTL_CR10 | LCTL_CR11); | ||
150 | vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); | ||
151 | } | ||
135 | } | 152 | } |
136 | 153 | ||
137 | static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) | 154 | static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) |
@@ -174,6 +191,106 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu, | |||
174 | } | 191 | } |
175 | } | 192 | } |
176 | 193 | ||
194 | static int __deliver_prog_irq(struct kvm_vcpu *vcpu, | ||
195 | struct kvm_s390_pgm_info *pgm_info) | ||
196 | { | ||
197 | const unsigned short table[] = { 2, 4, 4, 6 }; | ||
198 | int rc = 0; | ||
199 | |||
200 | switch (pgm_info->code & ~PGM_PER) { | ||
201 | case PGM_AFX_TRANSLATION: | ||
202 | case PGM_ASX_TRANSLATION: | ||
203 | case PGM_EX_TRANSLATION: | ||
204 | case PGM_LFX_TRANSLATION: | ||
205 | case PGM_LSTE_SEQUENCE: | ||
206 | case PGM_LSX_TRANSLATION: | ||
207 | case PGM_LX_TRANSLATION: | ||
208 | case PGM_PRIMARY_AUTHORITY: | ||
209 | case PGM_SECONDARY_AUTHORITY: | ||
210 | case PGM_SPACE_SWITCH: | ||
211 | rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, | ||
212 | (u64 *)__LC_TRANS_EXC_CODE); | ||
213 | break; | ||
214 | case PGM_ALEN_TRANSLATION: | ||
215 | case PGM_ALE_SEQUENCE: | ||
216 | case PGM_ASTE_INSTANCE: | ||
217 | case PGM_ASTE_SEQUENCE: | ||
218 | case PGM_ASTE_VALIDITY: | ||
219 | case PGM_EXTENDED_AUTHORITY: | ||
220 | rc = put_guest_lc(vcpu, pgm_info->exc_access_id, | ||
221 | (u8 *)__LC_EXC_ACCESS_ID); | ||
222 | break; | ||
223 | case PGM_ASCE_TYPE: | ||
224 | case PGM_PAGE_TRANSLATION: | ||
225 | case PGM_REGION_FIRST_TRANS: | ||
226 | case PGM_REGION_SECOND_TRANS: | ||
227 | case PGM_REGION_THIRD_TRANS: | ||
228 | case PGM_SEGMENT_TRANSLATION: | ||
229 | rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, | ||
230 | (u64 *)__LC_TRANS_EXC_CODE); | ||
231 | rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, | ||
232 | (u8 *)__LC_EXC_ACCESS_ID); | ||
233 | rc |= put_guest_lc(vcpu, pgm_info->op_access_id, | ||
234 | (u8 *)__LC_OP_ACCESS_ID); | ||
235 | break; | ||
236 | case PGM_MONITOR: | ||
237 | rc = put_guest_lc(vcpu, pgm_info->mon_class_nr, | ||
238 | (u64 *)__LC_MON_CLASS_NR); | ||
239 | rc |= put_guest_lc(vcpu, pgm_info->mon_code, | ||
240 | (u64 *)__LC_MON_CODE); | ||
241 | break; | ||
242 | case PGM_DATA: | ||
243 | rc = put_guest_lc(vcpu, pgm_info->data_exc_code, | ||
244 | (u32 *)__LC_DATA_EXC_CODE); | ||
245 | break; | ||
246 | case PGM_PROTECTION: | ||
247 | rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, | ||
248 | (u64 *)__LC_TRANS_EXC_CODE); | ||
249 | rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, | ||
250 | (u8 *)__LC_EXC_ACCESS_ID); | ||
251 | break; | ||
252 | } | ||
253 | |||
254 | if (pgm_info->code & PGM_PER) { | ||
255 | rc |= put_guest_lc(vcpu, pgm_info->per_code, | ||
256 | (u8 *) __LC_PER_CODE); | ||
257 | rc |= put_guest_lc(vcpu, pgm_info->per_atmid, | ||
258 | (u8 *)__LC_PER_ATMID); | ||
259 | rc |= put_guest_lc(vcpu, pgm_info->per_address, | ||
260 | (u64 *) __LC_PER_ADDRESS); | ||
261 | rc |= put_guest_lc(vcpu, pgm_info->per_access_id, | ||
262 | (u8 *) __LC_PER_ACCESS_ID); | ||
263 | } | ||
264 | |||
265 | switch (vcpu->arch.sie_block->icptcode) { | ||
266 | case ICPT_INST: | ||
267 | case ICPT_INSTPROGI: | ||
268 | case ICPT_OPEREXC: | ||
269 | case ICPT_PARTEXEC: | ||
270 | case ICPT_IOINST: | ||
271 | /* last instruction only stored for these icptcodes */ | ||
272 | rc |= put_guest_lc(vcpu, table[vcpu->arch.sie_block->ipa >> 14], | ||
273 | (u16 *) __LC_PGM_ILC); | ||
274 | break; | ||
275 | case ICPT_PROGI: | ||
276 | rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->pgmilc, | ||
277 | (u16 *) __LC_PGM_ILC); | ||
278 | break; | ||
279 | default: | ||
280 | rc |= put_guest_lc(vcpu, 0, | ||
281 | (u16 *) __LC_PGM_ILC); | ||
282 | } | ||
283 | |||
284 | rc |= put_guest_lc(vcpu, pgm_info->code, | ||
285 | (u16 *)__LC_PGM_INT_CODE); | ||
286 | rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, | ||
287 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
288 | rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW, | ||
289 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
290 | |||
291 | return rc; | ||
292 | } | ||
293 | |||
177 | static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | 294 | static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, |
178 | struct kvm_s390_interrupt_info *inti) | 295 | struct kvm_s390_interrupt_info *inti) |
179 | { | 296 | { |
@@ -186,26 +303,28 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
186 | vcpu->stat.deliver_emergency_signal++; | 303 | vcpu->stat.deliver_emergency_signal++; |
187 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 304 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, |
188 | inti->emerg.code, 0); | 305 | inti->emerg.code, 0); |
189 | rc = put_guest(vcpu, 0x1201, (u16 __user *)__LC_EXT_INT_CODE); | 306 | rc = put_guest_lc(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE); |
190 | rc |= put_guest(vcpu, inti->emerg.code, | 307 | rc |= put_guest_lc(vcpu, inti->emerg.code, |
191 | (u16 __user *)__LC_EXT_CPU_ADDR); | 308 | (u16 *)__LC_EXT_CPU_ADDR); |
192 | rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | 309 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
310 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
311 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | ||
193 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 312 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
194 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
195 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | ||
196 | break; | 313 | break; |
197 | case KVM_S390_INT_EXTERNAL_CALL: | 314 | case KVM_S390_INT_EXTERNAL_CALL: |
198 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); | 315 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); |
199 | vcpu->stat.deliver_external_call++; | 316 | vcpu->stat.deliver_external_call++; |
200 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 317 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, |
201 | inti->extcall.code, 0); | 318 | inti->extcall.code, 0); |
202 | rc = put_guest(vcpu, 0x1202, (u16 __user *)__LC_EXT_INT_CODE); | 319 | rc = put_guest_lc(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE); |
203 | rc |= put_guest(vcpu, inti->extcall.code, | 320 | rc |= put_guest_lc(vcpu, inti->extcall.code, |
204 | (u16 __user *)__LC_EXT_CPU_ADDR); | 321 | (u16 *)__LC_EXT_CPU_ADDR); |
205 | rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | 322 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
206 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 323 | &vcpu->arch.sie_block->gpsw, |
207 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | 324 | sizeof(psw_t)); |
208 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | 325 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
326 | &vcpu->arch.sie_block->gpsw, | ||
327 | sizeof(psw_t)); | ||
209 | break; | 328 | break; |
210 | case KVM_S390_INT_SERVICE: | 329 | case KVM_S390_INT_SERVICE: |
211 | VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", | 330 | VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", |
@@ -213,37 +332,39 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
213 | vcpu->stat.deliver_service_signal++; | 332 | vcpu->stat.deliver_service_signal++; |
214 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 333 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, |
215 | inti->ext.ext_params, 0); | 334 | inti->ext.ext_params, 0); |
216 | rc = put_guest(vcpu, 0x2401, (u16 __user *)__LC_EXT_INT_CODE); | 335 | rc = put_guest_lc(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE); |
217 | rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | 336 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
337 | &vcpu->arch.sie_block->gpsw, | ||
338 | sizeof(psw_t)); | ||
339 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | ||
218 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 340 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
219 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | 341 | rc |= put_guest_lc(vcpu, inti->ext.ext_params, |
220 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | 342 | (u32 *)__LC_EXT_PARAMS); |
221 | rc |= put_guest(vcpu, inti->ext.ext_params, | ||
222 | (u32 __user *)__LC_EXT_PARAMS); | ||
223 | break; | 343 | break; |
224 | case KVM_S390_INT_PFAULT_INIT: | 344 | case KVM_S390_INT_PFAULT_INIT: |
225 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, | 345 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, |
226 | inti->ext.ext_params2); | 346 | inti->ext.ext_params2); |
227 | rc = put_guest(vcpu, 0x2603, (u16 __user *) __LC_EXT_INT_CODE); | 347 | rc = put_guest_lc(vcpu, 0x2603, (u16 *) __LC_EXT_INT_CODE); |
228 | rc |= put_guest(vcpu, 0x0600, (u16 __user *) __LC_EXT_CPU_ADDR); | 348 | rc |= put_guest_lc(vcpu, 0x0600, (u16 *) __LC_EXT_CPU_ADDR); |
229 | rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | 349 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
350 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
351 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | ||
230 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 352 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
231 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | 353 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, |
232 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | 354 | (u64 *) __LC_EXT_PARAMS2); |
233 | rc |= put_guest(vcpu, inti->ext.ext_params2, | ||
234 | (u64 __user *) __LC_EXT_PARAMS2); | ||
235 | break; | 355 | break; |
236 | case KVM_S390_INT_PFAULT_DONE: | 356 | case KVM_S390_INT_PFAULT_DONE: |
237 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, | 357 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, |
238 | inti->ext.ext_params2); | 358 | inti->ext.ext_params2); |
239 | rc = put_guest(vcpu, 0x2603, (u16 __user *) __LC_EXT_INT_CODE); | 359 | rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); |
240 | rc |= put_guest(vcpu, 0x0680, (u16 __user *) __LC_EXT_CPU_ADDR); | 360 | rc |= put_guest_lc(vcpu, 0x0680, (u16 *)__LC_EXT_CPU_ADDR); |
241 | rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | 361 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
362 | &vcpu->arch.sie_block->gpsw, | ||
363 | sizeof(psw_t)); | ||
364 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | ||
242 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 365 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
243 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | 366 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, |
244 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | 367 | (u64 *)__LC_EXT_PARAMS2); |
245 | rc |= put_guest(vcpu, inti->ext.ext_params2, | ||
246 | (u64 __user *) __LC_EXT_PARAMS2); | ||
247 | break; | 368 | break; |
248 | case KVM_S390_INT_VIRTIO: | 369 | case KVM_S390_INT_VIRTIO: |
249 | VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", | 370 | VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", |
@@ -252,16 +373,17 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
252 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 373 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, |
253 | inti->ext.ext_params, | 374 | inti->ext.ext_params, |
254 | inti->ext.ext_params2); | 375 | inti->ext.ext_params2); |
255 | rc = put_guest(vcpu, 0x2603, (u16 __user *)__LC_EXT_INT_CODE); | 376 | rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); |
256 | rc |= put_guest(vcpu, 0x0d00, (u16 __user *)__LC_EXT_CPU_ADDR); | 377 | rc |= put_guest_lc(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR); |
257 | rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | 378 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
379 | &vcpu->arch.sie_block->gpsw, | ||
380 | sizeof(psw_t)); | ||
381 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | ||
258 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 382 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
259 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | 383 | rc |= put_guest_lc(vcpu, inti->ext.ext_params, |
260 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | 384 | (u32 *)__LC_EXT_PARAMS); |
261 | rc |= put_guest(vcpu, inti->ext.ext_params, | 385 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, |
262 | (u32 __user *)__LC_EXT_PARAMS); | 386 | (u64 *)__LC_EXT_PARAMS2); |
263 | rc |= put_guest(vcpu, inti->ext.ext_params2, | ||
264 | (u64 __user *)__LC_EXT_PARAMS2); | ||
265 | break; | 387 | break; |
266 | case KVM_S390_SIGP_STOP: | 388 | case KVM_S390_SIGP_STOP: |
267 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); | 389 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); |
@@ -285,12 +407,12 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
285 | vcpu->stat.deliver_restart_signal++; | 407 | vcpu->stat.deliver_restart_signal++; |
286 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 408 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, |
287 | 0, 0); | 409 | 0, 0); |
288 | rc = copy_to_guest(vcpu, | 410 | rc = write_guest_lc(vcpu, |
289 | offsetof(struct _lowcore, restart_old_psw), | 411 | offsetof(struct _lowcore, restart_old_psw), |
290 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 412 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
291 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | 413 | rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), |
292 | offsetof(struct _lowcore, restart_psw), | 414 | &vcpu->arch.sie_block->gpsw, |
293 | sizeof(psw_t)); | 415 | sizeof(psw_t)); |
294 | atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | 416 | atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); |
295 | break; | 417 | break; |
296 | case KVM_S390_PROGRAM_INT: | 418 | case KVM_S390_PROGRAM_INT: |
@@ -300,13 +422,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
300 | vcpu->stat.deliver_program_int++; | 422 | vcpu->stat.deliver_program_int++; |
301 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 423 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, |
302 | inti->pgm.code, 0); | 424 | inti->pgm.code, 0); |
303 | rc = put_guest(vcpu, inti->pgm.code, (u16 __user *)__LC_PGM_INT_CODE); | 425 | rc = __deliver_prog_irq(vcpu, &inti->pgm); |
304 | rc |= put_guest(vcpu, table[vcpu->arch.sie_block->ipa >> 14], | ||
305 | (u16 __user *)__LC_PGM_ILC); | ||
306 | rc |= copy_to_guest(vcpu, __LC_PGM_OLD_PSW, | ||
307 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
308 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
309 | __LC_PGM_NEW_PSW, sizeof(psw_t)); | ||
310 | break; | 426 | break; |
311 | 427 | ||
312 | case KVM_S390_MCHK: | 428 | case KVM_S390_MCHK: |
@@ -317,11 +433,12 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
317 | inti->mchk.mcic); | 433 | inti->mchk.mcic); |
318 | rc = kvm_s390_vcpu_store_status(vcpu, | 434 | rc = kvm_s390_vcpu_store_status(vcpu, |
319 | KVM_S390_STORE_STATUS_PREFIXED); | 435 | KVM_S390_STORE_STATUS_PREFIXED); |
320 | rc |= put_guest(vcpu, inti->mchk.mcic, (u64 __user *) __LC_MCCK_CODE); | 436 | rc |= put_guest_lc(vcpu, inti->mchk.mcic, (u64 *)__LC_MCCK_CODE); |
321 | rc |= copy_to_guest(vcpu, __LC_MCK_OLD_PSW, | 437 | rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, |
438 | &vcpu->arch.sie_block->gpsw, | ||
439 | sizeof(psw_t)); | ||
440 | rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, | ||
322 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 441 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
323 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
324 | __LC_MCK_NEW_PSW, sizeof(psw_t)); | ||
325 | break; | 442 | break; |
326 | 443 | ||
327 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | 444 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
@@ -334,18 +451,20 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
334 | vcpu->stat.deliver_io_int++; | 451 | vcpu->stat.deliver_io_int++; |
335 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 452 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, |
336 | param0, param1); | 453 | param0, param1); |
337 | rc = put_guest(vcpu, inti->io.subchannel_id, | 454 | rc = put_guest_lc(vcpu, inti->io.subchannel_id, |
338 | (u16 __user *) __LC_SUBCHANNEL_ID); | 455 | (u16 *)__LC_SUBCHANNEL_ID); |
339 | rc |= put_guest(vcpu, inti->io.subchannel_nr, | 456 | rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, |
340 | (u16 __user *) __LC_SUBCHANNEL_NR); | 457 | (u16 *)__LC_SUBCHANNEL_NR); |
341 | rc |= put_guest(vcpu, inti->io.io_int_parm, | 458 | rc |= put_guest_lc(vcpu, inti->io.io_int_parm, |
342 | (u32 __user *) __LC_IO_INT_PARM); | 459 | (u32 *)__LC_IO_INT_PARM); |
343 | rc |= put_guest(vcpu, inti->io.io_int_word, | 460 | rc |= put_guest_lc(vcpu, inti->io.io_int_word, |
344 | (u32 __user *) __LC_IO_INT_WORD); | 461 | (u32 *)__LC_IO_INT_WORD); |
345 | rc |= copy_to_guest(vcpu, __LC_IO_OLD_PSW, | 462 | rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, |
346 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 463 | &vcpu->arch.sie_block->gpsw, |
347 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | 464 | sizeof(psw_t)); |
348 | __LC_IO_NEW_PSW, sizeof(psw_t)); | 465 | rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, |
466 | &vcpu->arch.sie_block->gpsw, | ||
467 | sizeof(psw_t)); | ||
349 | break; | 468 | break; |
350 | } | 469 | } |
351 | default: | 470 | default: |
@@ -358,25 +477,21 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
358 | } | 477 | } |
359 | } | 478 | } |
360 | 479 | ||
361 | static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) | 480 | static void deliver_ckc_interrupt(struct kvm_vcpu *vcpu) |
362 | { | 481 | { |
363 | int rc; | 482 | int rc; |
364 | 483 | ||
365 | if (psw_extint_disabled(vcpu)) | 484 | rc = put_guest_lc(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE); |
366 | return 0; | 485 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
367 | if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) | 486 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
368 | return 0; | 487 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
369 | rc = put_guest(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE); | 488 | &vcpu->arch.sie_block->gpsw, |
370 | rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | 489 | sizeof(psw_t)); |
371 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
372 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
373 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | ||
374 | if (rc) { | 490 | if (rc) { |
375 | printk("kvm: The guest lowcore is not mapped during interrupt " | 491 | printk("kvm: The guest lowcore is not mapped during interrupt " |
376 | "delivery, killing userspace\n"); | 492 | "delivery, killing userspace\n"); |
377 | do_exit(SIGKILL); | 493 | do_exit(SIGKILL); |
378 | } | 494 | } |
379 | return 1; | ||
380 | } | 495 | } |
381 | 496 | ||
382 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | 497 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) |
@@ -406,19 +521,20 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | |||
406 | spin_unlock(&fi->lock); | 521 | spin_unlock(&fi->lock); |
407 | } | 522 | } |
408 | 523 | ||
409 | if ((!rc) && (vcpu->arch.sie_block->ckc < | 524 | if (!rc && kvm_cpu_has_pending_timer(vcpu)) |
410 | get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) { | 525 | rc = 1; |
411 | if ((!psw_extint_disabled(vcpu)) && | ||
412 | (vcpu->arch.sie_block->gcr[0] & 0x800ul)) | ||
413 | rc = 1; | ||
414 | } | ||
415 | 526 | ||
416 | return rc; | 527 | return rc; |
417 | } | 528 | } |
418 | 529 | ||
419 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | 530 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
420 | { | 531 | { |
421 | return 0; | 532 | if (!(vcpu->arch.sie_block->ckc < |
533 | get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) | ||
534 | return 0; | ||
535 | if (!ckc_interrupts_enabled(vcpu)) | ||
536 | return 0; | ||
537 | return 1; | ||
422 | } | 538 | } |
423 | 539 | ||
424 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | 540 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) |
@@ -441,8 +557,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | |||
441 | return -EOPNOTSUPP; /* disabled wait */ | 557 | return -EOPNOTSUPP; /* disabled wait */ |
442 | } | 558 | } |
443 | 559 | ||
444 | if (psw_extint_disabled(vcpu) || | 560 | if (!ckc_interrupts_enabled(vcpu)) { |
445 | (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) { | ||
446 | VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); | 561 | VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); |
447 | goto no_timer; | 562 | goto no_timer; |
448 | } | 563 | } |
@@ -554,9 +669,8 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
554 | } while (deliver); | 669 | } while (deliver); |
555 | } | 670 | } |
556 | 671 | ||
557 | if ((vcpu->arch.sie_block->ckc < | 672 | if (kvm_cpu_has_pending_timer(vcpu)) |
558 | get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) | 673 | deliver_ckc_interrupt(vcpu); |
559 | __try_deliver_ckc_interrupt(vcpu); | ||
560 | 674 | ||
561 | if (atomic_read(&fi->active)) { | 675 | if (atomic_read(&fi->active)) { |
562 | do { | 676 | do { |
@@ -660,6 +774,31 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) | |||
660 | return 0; | 774 | return 0; |
661 | } | 775 | } |
662 | 776 | ||
777 | int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, | ||
778 | struct kvm_s390_pgm_info *pgm_info) | ||
779 | { | ||
780 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | ||
781 | struct kvm_s390_interrupt_info *inti; | ||
782 | |||
783 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
784 | if (!inti) | ||
785 | return -ENOMEM; | ||
786 | |||
787 | VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)", | ||
788 | pgm_info->code); | ||
789 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, | ||
790 | pgm_info->code, 0, 1); | ||
791 | |||
792 | inti->type = KVM_S390_PROGRAM_INT; | ||
793 | memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); | ||
794 | spin_lock_bh(&li->lock); | ||
795 | list_add(&inti->list, &li->list); | ||
796 | atomic_set(&li->active, 1); | ||
797 | BUG_ON(waitqueue_active(li->wq)); | ||
798 | spin_unlock_bh(&li->lock); | ||
799 | return 0; | ||
800 | } | ||
801 | |||
663 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, | 802 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
664 | u64 cr6, u64 schid) | 803 | u64 cr6, u64 schid) |
665 | { | 804 | { |
@@ -810,6 +949,12 @@ int kvm_s390_inject_vm(struct kvm *kvm, | |||
810 | return __inject_vm(kvm, inti); | 949 | return __inject_vm(kvm, inti); |
811 | } | 950 | } |
812 | 951 | ||
952 | void kvm_s390_reinject_io_int(struct kvm *kvm, | ||
953 | struct kvm_s390_interrupt_info *inti) | ||
954 | { | ||
955 | __inject_vm(kvm, inti); | ||
956 | } | ||
957 | |||
813 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | 958 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, |
814 | struct kvm_s390_interrupt *s390int) | 959 | struct kvm_s390_interrupt *s390int) |
815 | { | 960 | { |
@@ -900,7 +1045,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
900 | return 0; | 1045 | return 0; |
901 | } | 1046 | } |
902 | 1047 | ||
903 | static void clear_floating_interrupts(struct kvm *kvm) | 1048 | void kvm_s390_clear_float_irqs(struct kvm *kvm) |
904 | { | 1049 | { |
905 | struct kvm_s390_float_interrupt *fi; | 1050 | struct kvm_s390_float_interrupt *fi; |
906 | struct kvm_s390_interrupt_info *n, *inti = NULL; | 1051 | struct kvm_s390_interrupt_info *n, *inti = NULL; |
@@ -1246,7 +1391,7 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
1246 | break; | 1391 | break; |
1247 | case KVM_DEV_FLIC_CLEAR_IRQS: | 1392 | case KVM_DEV_FLIC_CLEAR_IRQS: |
1248 | r = 0; | 1393 | r = 0; |
1249 | clear_floating_interrupts(dev->kvm); | 1394 | kvm_s390_clear_float_irqs(dev->kvm); |
1250 | break; | 1395 | break; |
1251 | case KVM_DEV_FLIC_APF_ENABLE: | 1396 | case KVM_DEV_FLIC_APF_ENABLE: |
1252 | dev->kvm->arch.gmap->pfault_enabled = 1; | 1397 | dev->kvm->arch.gmap->pfault_enabled = 1; |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index b3ecb8f5b6ce..b32c42cbc706 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -11,6 +11,7 @@ | |||
11 | * Christian Borntraeger <borntraeger@de.ibm.com> | 11 | * Christian Borntraeger <borntraeger@de.ibm.com> |
12 | * Heiko Carstens <heiko.carstens@de.ibm.com> | 12 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
13 | * Christian Ehrhardt <ehrhardt@de.ibm.com> | 13 | * Christian Ehrhardt <ehrhardt@de.ibm.com> |
14 | * Jason J. Herne <jjherne@us.ibm.com> | ||
14 | */ | 15 | */ |
15 | 16 | ||
16 | #include <linux/compiler.h> | 17 | #include <linux/compiler.h> |
@@ -51,6 +52,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
51 | { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, | 52 | { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, |
52 | { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, | 53 | { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, |
53 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, | 54 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, |
55 | { "instruction_stctl", VCPU_STAT(instruction_stctl) }, | ||
56 | { "instruction_stctg", VCPU_STAT(instruction_stctg) }, | ||
54 | { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, | 57 | { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, |
55 | { "deliver_external_call", VCPU_STAT(deliver_external_call) }, | 58 | { "deliver_external_call", VCPU_STAT(deliver_external_call) }, |
56 | { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, | 59 | { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, |
@@ -66,6 +69,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
66 | { "instruction_stpx", VCPU_STAT(instruction_stpx) }, | 69 | { "instruction_stpx", VCPU_STAT(instruction_stpx) }, |
67 | { "instruction_stap", VCPU_STAT(instruction_stap) }, | 70 | { "instruction_stap", VCPU_STAT(instruction_stap) }, |
68 | { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, | 71 | { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, |
72 | { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) }, | ||
69 | { "instruction_stsch", VCPU_STAT(instruction_stsch) }, | 73 | { "instruction_stsch", VCPU_STAT(instruction_stsch) }, |
70 | { "instruction_chsc", VCPU_STAT(instruction_chsc) }, | 74 | { "instruction_chsc", VCPU_STAT(instruction_chsc) }, |
71 | { "instruction_essa", VCPU_STAT(instruction_essa) }, | 75 | { "instruction_essa", VCPU_STAT(instruction_essa) }, |
@@ -90,7 +94,7 @@ unsigned long *vfacilities; | |||
90 | static struct gmap_notifier gmap_notifier; | 94 | static struct gmap_notifier gmap_notifier; |
91 | 95 | ||
92 | /* test availability of vfacility */ | 96 | /* test availability of vfacility */ |
93 | static inline int test_vfacility(unsigned long nr) | 97 | int test_vfacility(unsigned long nr) |
94 | { | 98 | { |
95 | return __test_facility(nr, (void *) vfacilities); | 99 | return __test_facility(nr, (void *) vfacilities); |
96 | } | 100 | } |
@@ -161,6 +165,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
161 | case KVM_CAP_IOEVENTFD: | 165 | case KVM_CAP_IOEVENTFD: |
162 | case KVM_CAP_DEVICE_CTRL: | 166 | case KVM_CAP_DEVICE_CTRL: |
163 | case KVM_CAP_ENABLE_CAP_VM: | 167 | case KVM_CAP_ENABLE_CAP_VM: |
168 | case KVM_CAP_VM_ATTRIBUTES: | ||
164 | r = 1; | 169 | r = 1; |
165 | break; | 170 | break; |
166 | case KVM_CAP_NR_VCPUS: | 171 | case KVM_CAP_NR_VCPUS: |
@@ -179,6 +184,25 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
179 | return r; | 184 | return r; |
180 | } | 185 | } |
181 | 186 | ||
187 | static void kvm_s390_sync_dirty_log(struct kvm *kvm, | ||
188 | struct kvm_memory_slot *memslot) | ||
189 | { | ||
190 | gfn_t cur_gfn, last_gfn; | ||
191 | unsigned long address; | ||
192 | struct gmap *gmap = kvm->arch.gmap; | ||
193 | |||
194 | down_read(&gmap->mm->mmap_sem); | ||
195 | /* Loop over all guest pages */ | ||
196 | last_gfn = memslot->base_gfn + memslot->npages; | ||
197 | for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) { | ||
198 | address = gfn_to_hva_memslot(memslot, cur_gfn); | ||
199 | |||
200 | if (gmap_test_and_clear_dirty(address, gmap)) | ||
201 | mark_page_dirty(kvm, cur_gfn); | ||
202 | } | ||
203 | up_read(&gmap->mm->mmap_sem); | ||
204 | } | ||
205 | |||
182 | /* Section: vm related */ | 206 | /* Section: vm related */ |
183 | /* | 207 | /* |
184 | * Get (and clear) the dirty memory log for a memory slot. | 208 | * Get (and clear) the dirty memory log for a memory slot. |
@@ -186,7 +210,36 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
186 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | 210 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
187 | struct kvm_dirty_log *log) | 211 | struct kvm_dirty_log *log) |
188 | { | 212 | { |
189 | return 0; | 213 | int r; |
214 | unsigned long n; | ||
215 | struct kvm_memory_slot *memslot; | ||
216 | int is_dirty = 0; | ||
217 | |||
218 | mutex_lock(&kvm->slots_lock); | ||
219 | |||
220 | r = -EINVAL; | ||
221 | if (log->slot >= KVM_USER_MEM_SLOTS) | ||
222 | goto out; | ||
223 | |||
224 | memslot = id_to_memslot(kvm->memslots, log->slot); | ||
225 | r = -ENOENT; | ||
226 | if (!memslot->dirty_bitmap) | ||
227 | goto out; | ||
228 | |||
229 | kvm_s390_sync_dirty_log(kvm, memslot); | ||
230 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | ||
231 | if (r) | ||
232 | goto out; | ||
233 | |||
234 | /* Clear the dirty log */ | ||
235 | if (is_dirty) { | ||
236 | n = kvm_dirty_bitmap_bytes(memslot); | ||
237 | memset(memslot->dirty_bitmap, 0, n); | ||
238 | } | ||
239 | r = 0; | ||
240 | out: | ||
241 | mutex_unlock(&kvm->slots_lock); | ||
242 | return r; | ||
190 | } | 243 | } |
191 | 244 | ||
192 | static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) | 245 | static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) |
@@ -208,11 +261,86 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) | |||
208 | return r; | 261 | return r; |
209 | } | 262 | } |
210 | 263 | ||
264 | static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) | ||
265 | { | ||
266 | int ret; | ||
267 | unsigned int idx; | ||
268 | switch (attr->attr) { | ||
269 | case KVM_S390_VM_MEM_ENABLE_CMMA: | ||
270 | ret = -EBUSY; | ||
271 | mutex_lock(&kvm->lock); | ||
272 | if (atomic_read(&kvm->online_vcpus) == 0) { | ||
273 | kvm->arch.use_cmma = 1; | ||
274 | ret = 0; | ||
275 | } | ||
276 | mutex_unlock(&kvm->lock); | ||
277 | break; | ||
278 | case KVM_S390_VM_MEM_CLR_CMMA: | ||
279 | mutex_lock(&kvm->lock); | ||
280 | idx = srcu_read_lock(&kvm->srcu); | ||
281 | page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false); | ||
282 | srcu_read_unlock(&kvm->srcu, idx); | ||
283 | mutex_unlock(&kvm->lock); | ||
284 | ret = 0; | ||
285 | break; | ||
286 | default: | ||
287 | ret = -ENXIO; | ||
288 | break; | ||
289 | } | ||
290 | return ret; | ||
291 | } | ||
292 | |||
293 | static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) | ||
294 | { | ||
295 | int ret; | ||
296 | |||
297 | switch (attr->group) { | ||
298 | case KVM_S390_VM_MEM_CTRL: | ||
299 | ret = kvm_s390_mem_control(kvm, attr); | ||
300 | break; | ||
301 | default: | ||
302 | ret = -ENXIO; | ||
303 | break; | ||
304 | } | ||
305 | |||
306 | return ret; | ||
307 | } | ||
308 | |||
309 | static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) | ||
310 | { | ||
311 | return -ENXIO; | ||
312 | } | ||
313 | |||
314 | static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) | ||
315 | { | ||
316 | int ret; | ||
317 | |||
318 | switch (attr->group) { | ||
319 | case KVM_S390_VM_MEM_CTRL: | ||
320 | switch (attr->attr) { | ||
321 | case KVM_S390_VM_MEM_ENABLE_CMMA: | ||
322 | case KVM_S390_VM_MEM_CLR_CMMA: | ||
323 | ret = 0; | ||
324 | break; | ||
325 | default: | ||
326 | ret = -ENXIO; | ||
327 | break; | ||
328 | } | ||
329 | break; | ||
330 | default: | ||
331 | ret = -ENXIO; | ||
332 | break; | ||
333 | } | ||
334 | |||
335 | return ret; | ||
336 | } | ||
337 | |||
211 | long kvm_arch_vm_ioctl(struct file *filp, | 338 | long kvm_arch_vm_ioctl(struct file *filp, |
212 | unsigned int ioctl, unsigned long arg) | 339 | unsigned int ioctl, unsigned long arg) |
213 | { | 340 | { |
214 | struct kvm *kvm = filp->private_data; | 341 | struct kvm *kvm = filp->private_data; |
215 | void __user *argp = (void __user *)arg; | 342 | void __user *argp = (void __user *)arg; |
343 | struct kvm_device_attr attr; | ||
216 | int r; | 344 | int r; |
217 | 345 | ||
218 | switch (ioctl) { | 346 | switch (ioctl) { |
@@ -245,6 +373,27 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
245 | } | 373 | } |
246 | break; | 374 | break; |
247 | } | 375 | } |
376 | case KVM_SET_DEVICE_ATTR: { | ||
377 | r = -EFAULT; | ||
378 | if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) | ||
379 | break; | ||
380 | r = kvm_s390_vm_set_attr(kvm, &attr); | ||
381 | break; | ||
382 | } | ||
383 | case KVM_GET_DEVICE_ATTR: { | ||
384 | r = -EFAULT; | ||
385 | if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) | ||
386 | break; | ||
387 | r = kvm_s390_vm_get_attr(kvm, &attr); | ||
388 | break; | ||
389 | } | ||
390 | case KVM_HAS_DEVICE_ATTR: { | ||
391 | r = -EFAULT; | ||
392 | if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) | ||
393 | break; | ||
394 | r = kvm_s390_vm_has_attr(kvm, &attr); | ||
395 | break; | ||
396 | } | ||
248 | default: | 397 | default: |
249 | r = -ENOTTY; | 398 | r = -ENOTTY; |
250 | } | 399 | } |
@@ -291,6 +440,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
291 | 440 | ||
292 | spin_lock_init(&kvm->arch.float_int.lock); | 441 | spin_lock_init(&kvm->arch.float_int.lock); |
293 | INIT_LIST_HEAD(&kvm->arch.float_int.list); | 442 | INIT_LIST_HEAD(&kvm->arch.float_int.list); |
443 | init_waitqueue_head(&kvm->arch.ipte_wq); | ||
294 | 444 | ||
295 | debug_register_view(kvm->arch.dbf, &debug_sprintf_view); | 445 | debug_register_view(kvm->arch.dbf, &debug_sprintf_view); |
296 | VM_EVENT(kvm, 3, "%s", "vm created"); | 446 | VM_EVENT(kvm, 3, "%s", "vm created"); |
@@ -321,6 +471,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |||
321 | { | 471 | { |
322 | VCPU_EVENT(vcpu, 3, "%s", "free cpu"); | 472 | VCPU_EVENT(vcpu, 3, "%s", "free cpu"); |
323 | trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); | 473 | trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); |
474 | kvm_s390_clear_local_irqs(vcpu); | ||
324 | kvm_clear_async_pf_completion_queue(vcpu); | 475 | kvm_clear_async_pf_completion_queue(vcpu); |
325 | if (!kvm_is_ucontrol(vcpu->kvm)) { | 476 | if (!kvm_is_ucontrol(vcpu->kvm)) { |
326 | clear_bit(63 - vcpu->vcpu_id, | 477 | clear_bit(63 - vcpu->vcpu_id, |
@@ -334,9 +485,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |||
334 | if (kvm_is_ucontrol(vcpu->kvm)) | 485 | if (kvm_is_ucontrol(vcpu->kvm)) |
335 | gmap_free(vcpu->arch.gmap); | 486 | gmap_free(vcpu->arch.gmap); |
336 | 487 | ||
337 | if (vcpu->arch.sie_block->cbrlo) | 488 | if (kvm_s390_cmma_enabled(vcpu->kvm)) |
338 | __free_page(__pfn_to_page( | 489 | kvm_s390_vcpu_unsetup_cmma(vcpu); |
339 | vcpu->arch.sie_block->cbrlo >> PAGE_SHIFT)); | ||
340 | free_page((unsigned long)(vcpu->arch.sie_block)); | 490 | free_page((unsigned long)(vcpu->arch.sie_block)); |
341 | 491 | ||
342 | kvm_vcpu_uninit(vcpu); | 492 | kvm_vcpu_uninit(vcpu); |
@@ -371,6 +521,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
371 | if (!kvm_is_ucontrol(kvm)) | 521 | if (!kvm_is_ucontrol(kvm)) |
372 | gmap_free(kvm->arch.gmap); | 522 | gmap_free(kvm->arch.gmap); |
373 | kvm_s390_destroy_adapters(kvm); | 523 | kvm_s390_destroy_adapters(kvm); |
524 | kvm_s390_clear_float_irqs(kvm); | ||
374 | } | 525 | } |
375 | 526 | ||
376 | /* Section: vcpu related */ | 527 | /* Section: vcpu related */ |
@@ -450,9 +601,26 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | |||
450 | return 0; | 601 | return 0; |
451 | } | 602 | } |
452 | 603 | ||
604 | void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) | ||
605 | { | ||
606 | free_page(vcpu->arch.sie_block->cbrlo); | ||
607 | vcpu->arch.sie_block->cbrlo = 0; | ||
608 | } | ||
609 | |||
610 | int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) | ||
611 | { | ||
612 | vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); | ||
613 | if (!vcpu->arch.sie_block->cbrlo) | ||
614 | return -ENOMEM; | ||
615 | |||
616 | vcpu->arch.sie_block->ecb2 |= 0x80; | ||
617 | vcpu->arch.sie_block->ecb2 &= ~0x08; | ||
618 | return 0; | ||
619 | } | ||
620 | |||
453 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 621 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
454 | { | 622 | { |
455 | struct page *cbrl; | 623 | int rc = 0; |
456 | 624 | ||
457 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | | 625 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | |
458 | CPUSTAT_SM | | 626 | CPUSTAT_SM | |
@@ -463,15 +631,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
463 | vcpu->arch.sie_block->ecb |= 0x10; | 631 | vcpu->arch.sie_block->ecb |= 0x10; |
464 | 632 | ||
465 | vcpu->arch.sie_block->ecb2 = 8; | 633 | vcpu->arch.sie_block->ecb2 = 8; |
466 | vcpu->arch.sie_block->eca = 0xC1002001U; | 634 | vcpu->arch.sie_block->eca = 0xC1002000U; |
635 | if (sclp_has_siif()) | ||
636 | vcpu->arch.sie_block->eca |= 1; | ||
467 | vcpu->arch.sie_block->fac = (int) (long) vfacilities; | 637 | vcpu->arch.sie_block->fac = (int) (long) vfacilities; |
468 | if (kvm_enabled_cmma()) { | 638 | vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; |
469 | cbrl = alloc_page(GFP_KERNEL | __GFP_ZERO); | 639 | if (kvm_s390_cmma_enabled(vcpu->kvm)) { |
470 | if (cbrl) { | 640 | rc = kvm_s390_vcpu_setup_cmma(vcpu); |
471 | vcpu->arch.sie_block->ecb2 |= 0x80; | 641 | if (rc) |
472 | vcpu->arch.sie_block->ecb2 &= ~0x08; | 642 | return rc; |
473 | vcpu->arch.sie_block->cbrlo = page_to_phys(cbrl); | ||
474 | } | ||
475 | } | 643 | } |
476 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); | 644 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
477 | tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, | 645 | tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, |
@@ -479,7 +647,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
479 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; | 647 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; |
480 | get_cpu_id(&vcpu->arch.cpu_id); | 648 | get_cpu_id(&vcpu->arch.cpu_id); |
481 | vcpu->arch.cpu_id.version = 0xff; | 649 | vcpu->arch.cpu_id.version = 0xff; |
482 | return 0; | 650 | return rc; |
483 | } | 651 | } |
484 | 652 | ||
485 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | 653 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, |
@@ -768,10 +936,40 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |||
768 | return -EINVAL; /* not implemented yet */ | 936 | return -EINVAL; /* not implemented yet */ |
769 | } | 937 | } |
770 | 938 | ||
939 | #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \ | ||
940 | KVM_GUESTDBG_USE_HW_BP | \ | ||
941 | KVM_GUESTDBG_ENABLE) | ||
942 | |||
771 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | 943 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
772 | struct kvm_guest_debug *dbg) | 944 | struct kvm_guest_debug *dbg) |
773 | { | 945 | { |
774 | return -EINVAL; /* not implemented yet */ | 946 | int rc = 0; |
947 | |||
948 | vcpu->guest_debug = 0; | ||
949 | kvm_s390_clear_bp_data(vcpu); | ||
950 | |||
951 | if (vcpu->guest_debug & ~VALID_GUESTDBG_FLAGS) | ||
952 | return -EINVAL; | ||
953 | |||
954 | if (dbg->control & KVM_GUESTDBG_ENABLE) { | ||
955 | vcpu->guest_debug = dbg->control; | ||
956 | /* enforce guest PER */ | ||
957 | atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); | ||
958 | |||
959 | if (dbg->control & KVM_GUESTDBG_USE_HW_BP) | ||
960 | rc = kvm_s390_import_bp_data(vcpu, dbg); | ||
961 | } else { | ||
962 | atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); | ||
963 | vcpu->arch.guestdbg.last_bp = 0; | ||
964 | } | ||
965 | |||
966 | if (rc) { | ||
967 | vcpu->guest_debug = 0; | ||
968 | kvm_s390_clear_bp_data(vcpu); | ||
969 | atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); | ||
970 | } | ||
971 | |||
972 | return rc; | ||
775 | } | 973 | } |
776 | 974 | ||
777 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 975 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
@@ -786,6 +984,18 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |||
786 | return -EINVAL; /* not implemented yet */ | 984 | return -EINVAL; /* not implemented yet */ |
787 | } | 985 | } |
788 | 986 | ||
987 | bool kvm_s390_cmma_enabled(struct kvm *kvm) | ||
988 | { | ||
989 | if (!MACHINE_IS_LPAR) | ||
990 | return false; | ||
991 | /* only enable for z10 and later */ | ||
992 | if (!MACHINE_HAS_EDAT1) | ||
993 | return false; | ||
994 | if (!kvm->arch.use_cmma) | ||
995 | return false; | ||
996 | return true; | ||
997 | } | ||
998 | |||
789 | static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) | 999 | static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) |
790 | { | 1000 | { |
791 | /* | 1001 | /* |
@@ -882,8 +1092,9 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) | |||
882 | if (!vcpu->arch.gmap->pfault_enabled) | 1092 | if (!vcpu->arch.gmap->pfault_enabled) |
883 | return 0; | 1093 | return 0; |
884 | 1094 | ||
885 | hva = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap); | 1095 | hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); |
886 | if (copy_from_guest(vcpu, &arch.pfault_token, vcpu->arch.pfault_token, 8)) | 1096 | hva += current->thread.gmap_addr & ~PAGE_MASK; |
1097 | if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) | ||
887 | return 0; | 1098 | return 0; |
888 | 1099 | ||
889 | rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); | 1100 | rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); |
@@ -916,6 +1127,11 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) | |||
916 | if (rc) | 1127 | if (rc) |
917 | return rc; | 1128 | return rc; |
918 | 1129 | ||
1130 | if (guestdbg_enabled(vcpu)) { | ||
1131 | kvm_s390_backup_guest_per_regs(vcpu); | ||
1132 | kvm_s390_patch_guest_per_regs(vcpu); | ||
1133 | } | ||
1134 | |||
919 | vcpu->arch.sie_block->icptcode = 0; | 1135 | vcpu->arch.sie_block->icptcode = 0; |
920 | cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); | 1136 | cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); |
921 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); | 1137 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); |
@@ -932,6 +1148,9 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) | |||
932 | vcpu->arch.sie_block->icptcode); | 1148 | vcpu->arch.sie_block->icptcode); |
933 | trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); | 1149 | trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); |
934 | 1150 | ||
1151 | if (guestdbg_enabled(vcpu)) | ||
1152 | kvm_s390_restore_guest_per_regs(vcpu); | ||
1153 | |||
935 | if (exit_reason >= 0) { | 1154 | if (exit_reason >= 0) { |
936 | rc = 0; | 1155 | rc = 0; |
937 | } else if (kvm_is_ucontrol(vcpu->kvm)) { | 1156 | } else if (kvm_is_ucontrol(vcpu->kvm)) { |
@@ -968,16 +1187,6 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) | |||
968 | return rc; | 1187 | return rc; |
969 | } | 1188 | } |
970 | 1189 | ||
971 | bool kvm_enabled_cmma(void) | ||
972 | { | ||
973 | if (!MACHINE_IS_LPAR) | ||
974 | return false; | ||
975 | /* only enable for z10 and later */ | ||
976 | if (!MACHINE_HAS_EDAT1) | ||
977 | return false; | ||
978 | return true; | ||
979 | } | ||
980 | |||
981 | static int __vcpu_run(struct kvm_vcpu *vcpu) | 1190 | static int __vcpu_run(struct kvm_vcpu *vcpu) |
982 | { | 1191 | { |
983 | int rc, exit_reason; | 1192 | int rc, exit_reason; |
@@ -1007,7 +1216,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) | |||
1007 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | 1216 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
1008 | 1217 | ||
1009 | rc = vcpu_post_run(vcpu, exit_reason); | 1218 | rc = vcpu_post_run(vcpu, exit_reason); |
1010 | } while (!signal_pending(current) && !rc); | 1219 | } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); |
1011 | 1220 | ||
1012 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | 1221 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); |
1013 | return rc; | 1222 | return rc; |
@@ -1018,6 +1227,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1018 | int rc; | 1227 | int rc; |
1019 | sigset_t sigsaved; | 1228 | sigset_t sigsaved; |
1020 | 1229 | ||
1230 | if (guestdbg_exit_pending(vcpu)) { | ||
1231 | kvm_s390_prepare_debug_exit(vcpu); | ||
1232 | return 0; | ||
1233 | } | ||
1234 | |||
1021 | if (vcpu->sigset_active) | 1235 | if (vcpu->sigset_active) |
1022 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | 1236 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
1023 | 1237 | ||
@@ -1030,6 +1244,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1030 | case KVM_EXIT_S390_RESET: | 1244 | case KVM_EXIT_S390_RESET: |
1031 | case KVM_EXIT_S390_UCONTROL: | 1245 | case KVM_EXIT_S390_UCONTROL: |
1032 | case KVM_EXIT_S390_TSCH: | 1246 | case KVM_EXIT_S390_TSCH: |
1247 | case KVM_EXIT_DEBUG: | ||
1033 | break; | 1248 | break; |
1034 | default: | 1249 | default: |
1035 | BUG(); | 1250 | BUG(); |
@@ -1055,6 +1270,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1055 | rc = -EINTR; | 1270 | rc = -EINTR; |
1056 | } | 1271 | } |
1057 | 1272 | ||
1273 | if (guestdbg_exit_pending(vcpu) && !rc) { | ||
1274 | kvm_s390_prepare_debug_exit(vcpu); | ||
1275 | rc = 0; | ||
1276 | } | ||
1277 | |||
1058 | if (rc == -EOPNOTSUPP) { | 1278 | if (rc == -EOPNOTSUPP) { |
1059 | /* intercept cannot be handled in-kernel, prepare kvm-run */ | 1279 | /* intercept cannot be handled in-kernel, prepare kvm-run */ |
1060 | kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; | 1280 | kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; |
@@ -1082,83 +1302,50 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1082 | return rc; | 1302 | return rc; |
1083 | } | 1303 | } |
1084 | 1304 | ||
1085 | static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from, | ||
1086 | unsigned long n, int prefix) | ||
1087 | { | ||
1088 | if (prefix) | ||
1089 | return copy_to_guest(vcpu, guestdest, from, n); | ||
1090 | else | ||
1091 | return copy_to_guest_absolute(vcpu, guestdest, from, n); | ||
1092 | } | ||
1093 | |||
1094 | /* | 1305 | /* |
1095 | * store status at address | 1306 | * store status at address |
1096 | * we use have two special cases: | 1307 | * we use have two special cases: |
1097 | * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit | 1308 | * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit |
1098 | * KVM_S390_STORE_STATUS_PREFIXED: -> prefix | 1309 | * KVM_S390_STORE_STATUS_PREFIXED: -> prefix |
1099 | */ | 1310 | */ |
1100 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr) | 1311 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) |
1101 | { | 1312 | { |
1102 | unsigned char archmode = 1; | 1313 | unsigned char archmode = 1; |
1103 | int prefix; | ||
1104 | u64 clkcomp; | 1314 | u64 clkcomp; |
1315 | int rc; | ||
1105 | 1316 | ||
1106 | if (addr == KVM_S390_STORE_STATUS_NOADDR) { | 1317 | if (gpa == KVM_S390_STORE_STATUS_NOADDR) { |
1107 | if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1)) | 1318 | if (write_guest_abs(vcpu, 163, &archmode, 1)) |
1108 | return -EFAULT; | 1319 | return -EFAULT; |
1109 | addr = SAVE_AREA_BASE; | 1320 | gpa = SAVE_AREA_BASE; |
1110 | prefix = 0; | 1321 | } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { |
1111 | } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) { | 1322 | if (write_guest_real(vcpu, 163, &archmode, 1)) |
1112 | if (copy_to_guest(vcpu, 163ul, &archmode, 1)) | ||
1113 | return -EFAULT; | 1323 | return -EFAULT; |
1114 | addr = SAVE_AREA_BASE; | 1324 | gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE); |
1115 | prefix = 1; | 1325 | } |
1116 | } else | 1326 | rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs), |
1117 | prefix = 0; | 1327 | vcpu->arch.guest_fpregs.fprs, 128); |
1118 | 1328 | rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs), | |
1119 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), | 1329 | vcpu->run->s.regs.gprs, 128); |
1120 | vcpu->arch.guest_fpregs.fprs, 128, prefix)) | 1330 | rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), |
1121 | return -EFAULT; | 1331 | &vcpu->arch.sie_block->gpsw, 16); |
1122 | 1332 | rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), | |
1123 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs), | 1333 | &vcpu->arch.sie_block->prefix, 4); |
1124 | vcpu->run->s.regs.gprs, 128, prefix)) | 1334 | rc |= write_guest_abs(vcpu, |
1125 | return -EFAULT; | 1335 | gpa + offsetof(struct save_area, fp_ctrl_reg), |
1126 | 1336 | &vcpu->arch.guest_fpregs.fpc, 4); | |
1127 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw), | 1337 | rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg), |
1128 | &vcpu->arch.sie_block->gpsw, 16, prefix)) | 1338 | &vcpu->arch.sie_block->todpr, 4); |
1129 | return -EFAULT; | 1339 | rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer), |
1130 | 1340 | &vcpu->arch.sie_block->cputm, 8); | |
1131 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg), | ||
1132 | &vcpu->arch.sie_block->prefix, 4, prefix)) | ||
1133 | return -EFAULT; | ||
1134 | |||
1135 | if (__guestcopy(vcpu, | ||
1136 | addr + offsetof(struct save_area, fp_ctrl_reg), | ||
1137 | &vcpu->arch.guest_fpregs.fpc, 4, prefix)) | ||
1138 | return -EFAULT; | ||
1139 | |||
1140 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg), | ||
1141 | &vcpu->arch.sie_block->todpr, 4, prefix)) | ||
1142 | return -EFAULT; | ||
1143 | |||
1144 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer), | ||
1145 | &vcpu->arch.sie_block->cputm, 8, prefix)) | ||
1146 | return -EFAULT; | ||
1147 | |||
1148 | clkcomp = vcpu->arch.sie_block->ckc >> 8; | 1341 | clkcomp = vcpu->arch.sie_block->ckc >> 8; |
1149 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp), | 1342 | rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp), |
1150 | &clkcomp, 8, prefix)) | 1343 | &clkcomp, 8); |
1151 | return -EFAULT; | 1344 | rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs), |
1152 | 1345 | &vcpu->run->s.regs.acrs, 64); | |
1153 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), | 1346 | rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs), |
1154 | &vcpu->run->s.regs.acrs, 64, prefix)) | 1347 | &vcpu->arch.sie_block->gcr, 128); |
1155 | return -EFAULT; | 1348 | return rc ? -EFAULT : 0; |
1156 | |||
1157 | if (__guestcopy(vcpu, | ||
1158 | addr + offsetof(struct save_area, ctrl_regs), | ||
1159 | &vcpu->arch.sie_block->gcr, 128, prefix)) | ||
1160 | return -EFAULT; | ||
1161 | return 0; | ||
1162 | } | 1349 | } |
1163 | 1350 | ||
1164 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | 1351 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 3c1e2274d9ea..9b5680d1f6cc 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -28,7 +28,6 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); | |||
28 | 28 | ||
29 | /* Transactional Memory Execution related macros */ | 29 | /* Transactional Memory Execution related macros */ |
30 | #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) | 30 | #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) |
31 | #define TDB_ADDR 0x1800UL | ||
32 | #define TDB_FORMAT1 1 | 31 | #define TDB_FORMAT1 1 |
33 | #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) | 32 | #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) |
34 | 33 | ||
@@ -130,6 +129,7 @@ void kvm_s390_tasklet(unsigned long parm); | |||
130 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); | 129 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); |
131 | void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); | 130 | void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); |
132 | void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu); | 131 | void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu); |
132 | void kvm_s390_clear_float_irqs(struct kvm *kvm); | ||
133 | int __must_check kvm_s390_inject_vm(struct kvm *kvm, | 133 | int __must_check kvm_s390_inject_vm(struct kvm *kvm, |
134 | struct kvm_s390_interrupt *s390int); | 134 | struct kvm_s390_interrupt *s390int); |
135 | int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | 135 | int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, |
@@ -137,6 +137,8 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
137 | int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); | 137 | int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); |
138 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, | 138 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
139 | u64 cr6, u64 schid); | 139 | u64 cr6, u64 schid); |
140 | void kvm_s390_reinject_io_int(struct kvm *kvm, | ||
141 | struct kvm_s390_interrupt_info *inti); | ||
140 | int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); | 142 | int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); |
141 | 143 | ||
142 | /* implemented in priv.c */ | 144 | /* implemented in priv.c */ |
@@ -145,6 +147,7 @@ int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); | |||
145 | int kvm_s390_handle_01(struct kvm_vcpu *vcpu); | 147 | int kvm_s390_handle_01(struct kvm_vcpu *vcpu); |
146 | int kvm_s390_handle_b9(struct kvm_vcpu *vcpu); | 148 | int kvm_s390_handle_b9(struct kvm_vcpu *vcpu); |
147 | int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu); | 149 | int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu); |
150 | int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu); | ||
148 | int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu); | 151 | int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu); |
149 | int kvm_s390_handle_eb(struct kvm_vcpu *vcpu); | 152 | int kvm_s390_handle_eb(struct kvm_vcpu *vcpu); |
150 | 153 | ||
@@ -158,14 +161,64 @@ void s390_vcpu_block(struct kvm_vcpu *vcpu); | |||
158 | void s390_vcpu_unblock(struct kvm_vcpu *vcpu); | 161 | void s390_vcpu_unblock(struct kvm_vcpu *vcpu); |
159 | void exit_sie(struct kvm_vcpu *vcpu); | 162 | void exit_sie(struct kvm_vcpu *vcpu); |
160 | void exit_sie_sync(struct kvm_vcpu *vcpu); | 163 | void exit_sie_sync(struct kvm_vcpu *vcpu); |
161 | /* are we going to support cmma? */ | 164 | int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); |
162 | bool kvm_enabled_cmma(void); | 165 | void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu); |
166 | /* is cmma enabled */ | ||
167 | bool kvm_s390_cmma_enabled(struct kvm *kvm); | ||
168 | int test_vfacility(unsigned long nr); | ||
169 | |||
163 | /* implemented in diag.c */ | 170 | /* implemented in diag.c */ |
164 | int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); | 171 | int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); |
172 | /* implemented in interrupt.c */ | ||
173 | int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, | ||
174 | struct kvm_s390_pgm_info *pgm_info); | ||
175 | |||
176 | /** | ||
177 | * kvm_s390_inject_prog_cond - conditionally inject a program check | ||
178 | * @vcpu: virtual cpu | ||
179 | * @rc: original return/error code | ||
180 | * | ||
181 | * This function is supposed to be used after regular guest access functions | ||
182 | * failed, to conditionally inject a program check to a vcpu. The typical | ||
183 | * pattern would look like | ||
184 | * | ||
185 | * rc = write_guest(vcpu, addr, data, len); | ||
186 | * if (rc) | ||
187 | * return kvm_s390_inject_prog_cond(vcpu, rc); | ||
188 | * | ||
189 | * A negative return code from guest access functions implies an internal error | ||
190 | * like e.g. out of memory. In these cases no program check should be injected | ||
191 | * to the guest. | ||
192 | * A positive value implies that an exception happened while accessing a guest's | ||
193 | * memory. In this case all data belonging to the corresponding program check | ||
194 | * has been stored in vcpu->arch.pgm and can be injected with | ||
195 | * kvm_s390_inject_prog_irq(). | ||
196 | * | ||
197 | * Returns: - the original @rc value if @rc was negative (internal error) | ||
198 | * - zero if @rc was already zero | ||
199 | * - zero or error code from injecting if @rc was positive | ||
200 | * (program check injected to @vcpu) | ||
201 | */ | ||
202 | static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc) | ||
203 | { | ||
204 | if (rc <= 0) | ||
205 | return rc; | ||
206 | return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); | ||
207 | } | ||
165 | 208 | ||
166 | /* implemented in interrupt.c */ | 209 | /* implemented in interrupt.c */ |
167 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); | 210 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); |
168 | int psw_extint_disabled(struct kvm_vcpu *vcpu); | 211 | int psw_extint_disabled(struct kvm_vcpu *vcpu); |
169 | void kvm_s390_destroy_adapters(struct kvm *kvm); | 212 | void kvm_s390_destroy_adapters(struct kvm *kvm); |
170 | 213 | ||
214 | /* implemented in guestdbg.c */ | ||
215 | void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); | ||
216 | void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu); | ||
217 | void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu); | ||
218 | int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu, | ||
219 | struct kvm_guest_debug *dbg); | ||
220 | void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu); | ||
221 | void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu); | ||
222 | void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu); | ||
223 | |||
171 | #endif | 224 | #endif |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 476e9e218f43..27f9051a78f8 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -35,8 +35,8 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) | |||
35 | { | 35 | { |
36 | struct kvm_vcpu *cpup; | 36 | struct kvm_vcpu *cpup; |
37 | s64 hostclk, val; | 37 | s64 hostclk, val; |
38 | int i, rc; | ||
38 | u64 op2; | 39 | u64 op2; |
39 | int i; | ||
40 | 40 | ||
41 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 41 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
42 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 42 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
@@ -44,8 +44,9 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) | |||
44 | op2 = kvm_s390_get_base_disp_s(vcpu); | 44 | op2 = kvm_s390_get_base_disp_s(vcpu); |
45 | if (op2 & 7) /* Operand must be on a doubleword boundary */ | 45 | if (op2 & 7) /* Operand must be on a doubleword boundary */ |
46 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 46 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
47 | if (get_guest(vcpu, val, (u64 __user *) op2)) | 47 | rc = read_guest(vcpu, op2, &val, sizeof(val)); |
48 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 48 | if (rc) |
49 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
49 | 50 | ||
50 | if (store_tod_clock(&hostclk)) { | 51 | if (store_tod_clock(&hostclk)) { |
51 | kvm_s390_set_psw_cc(vcpu, 3); | 52 | kvm_s390_set_psw_cc(vcpu, 3); |
@@ -65,8 +66,8 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) | |||
65 | static int handle_set_prefix(struct kvm_vcpu *vcpu) | 66 | static int handle_set_prefix(struct kvm_vcpu *vcpu) |
66 | { | 67 | { |
67 | u64 operand2; | 68 | u64 operand2; |
68 | u32 address = 0; | 69 | u32 address; |
69 | u8 tmp; | 70 | int rc; |
70 | 71 | ||
71 | vcpu->stat.instruction_spx++; | 72 | vcpu->stat.instruction_spx++; |
72 | 73 | ||
@@ -80,14 +81,18 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) | |||
80 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 81 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
81 | 82 | ||
82 | /* get the value */ | 83 | /* get the value */ |
83 | if (get_guest(vcpu, address, (u32 __user *) operand2)) | 84 | rc = read_guest(vcpu, operand2, &address, sizeof(address)); |
84 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 85 | if (rc) |
86 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
85 | 87 | ||
86 | address = address & 0x7fffe000u; | 88 | address &= 0x7fffe000u; |
87 | 89 | ||
88 | /* make sure that the new value is valid memory */ | 90 | /* |
89 | if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || | 91 | * Make sure the new value is valid memory. We only need to check the |
90 | (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) | 92 | * first page, since address is 8k aligned and memory pieces are always |
93 | * at least 1MB aligned and have at least a size of 1MB. | ||
94 | */ | ||
95 | if (kvm_is_error_gpa(vcpu->kvm, address)) | ||
91 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 96 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
92 | 97 | ||
93 | kvm_s390_set_prefix(vcpu, address); | 98 | kvm_s390_set_prefix(vcpu, address); |
@@ -101,6 +106,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) | |||
101 | { | 106 | { |
102 | u64 operand2; | 107 | u64 operand2; |
103 | u32 address; | 108 | u32 address; |
109 | int rc; | ||
104 | 110 | ||
105 | vcpu->stat.instruction_stpx++; | 111 | vcpu->stat.instruction_stpx++; |
106 | 112 | ||
@@ -117,8 +123,9 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) | |||
117 | address = address & 0x7fffe000u; | 123 | address = address & 0x7fffe000u; |
118 | 124 | ||
119 | /* get the value */ | 125 | /* get the value */ |
120 | if (put_guest(vcpu, address, (u32 __user *)operand2)) | 126 | rc = write_guest(vcpu, operand2, &address, sizeof(address)); |
121 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 127 | if (rc) |
128 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
122 | 129 | ||
123 | VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); | 130 | VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); |
124 | trace_kvm_s390_handle_prefix(vcpu, 0, address); | 131 | trace_kvm_s390_handle_prefix(vcpu, 0, address); |
@@ -127,28 +134,44 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) | |||
127 | 134 | ||
128 | static int handle_store_cpu_address(struct kvm_vcpu *vcpu) | 135 | static int handle_store_cpu_address(struct kvm_vcpu *vcpu) |
129 | { | 136 | { |
130 | u64 useraddr; | 137 | u16 vcpu_id = vcpu->vcpu_id; |
138 | u64 ga; | ||
139 | int rc; | ||
131 | 140 | ||
132 | vcpu->stat.instruction_stap++; | 141 | vcpu->stat.instruction_stap++; |
133 | 142 | ||
134 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 143 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
135 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 144 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
136 | 145 | ||
137 | useraddr = kvm_s390_get_base_disp_s(vcpu); | 146 | ga = kvm_s390_get_base_disp_s(vcpu); |
138 | 147 | ||
139 | if (useraddr & 1) | 148 | if (ga & 1) |
140 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 149 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
141 | 150 | ||
142 | if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr)) | 151 | rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id)); |
143 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 152 | if (rc) |
153 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
144 | 154 | ||
145 | VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr); | 155 | VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga); |
146 | trace_kvm_s390_handle_stap(vcpu, useraddr); | 156 | trace_kvm_s390_handle_stap(vcpu, ga); |
147 | return 0; | 157 | return 0; |
148 | } | 158 | } |
149 | 159 | ||
160 | static void __skey_check_enable(struct kvm_vcpu *vcpu) | ||
161 | { | ||
162 | if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) | ||
163 | return; | ||
164 | |||
165 | s390_enable_skey(); | ||
166 | trace_kvm_s390_skey_related_inst(vcpu); | ||
167 | vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); | ||
168 | } | ||
169 | |||
170 | |||
150 | static int handle_skey(struct kvm_vcpu *vcpu) | 171 | static int handle_skey(struct kvm_vcpu *vcpu) |
151 | { | 172 | { |
173 | __skey_check_enable(vcpu); | ||
174 | |||
152 | vcpu->stat.instruction_storage_key++; | 175 | vcpu->stat.instruction_storage_key++; |
153 | 176 | ||
154 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 177 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
@@ -160,9 +183,21 @@ static int handle_skey(struct kvm_vcpu *vcpu) | |||
160 | return 0; | 183 | return 0; |
161 | } | 184 | } |
162 | 185 | ||
186 | static int handle_ipte_interlock(struct kvm_vcpu *vcpu) | ||
187 | { | ||
188 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | ||
189 | |||
190 | vcpu->stat.instruction_ipte_interlock++; | ||
191 | if (psw_bits(*psw).p) | ||
192 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
193 | wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); | ||
194 | psw->addr = __rewind_psw(*psw, 4); | ||
195 | VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); | ||
196 | return 0; | ||
197 | } | ||
198 | |||
163 | static int handle_test_block(struct kvm_vcpu *vcpu) | 199 | static int handle_test_block(struct kvm_vcpu *vcpu) |
164 | { | 200 | { |
165 | unsigned long hva; | ||
166 | gpa_t addr; | 201 | gpa_t addr; |
167 | int reg2; | 202 | int reg2; |
168 | 203 | ||
@@ -173,14 +208,13 @@ static int handle_test_block(struct kvm_vcpu *vcpu) | |||
173 | addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; | 208 | addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; |
174 | addr = kvm_s390_real_to_abs(vcpu, addr); | 209 | addr = kvm_s390_real_to_abs(vcpu, addr); |
175 | 210 | ||
176 | hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr)); | 211 | if (kvm_is_error_gpa(vcpu->kvm, addr)) |
177 | if (kvm_is_error_hva(hva)) | ||
178 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 212 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
179 | /* | 213 | /* |
180 | * We don't expect errors on modern systems, and do not care | 214 | * We don't expect errors on modern systems, and do not care |
181 | * about storage keys (yet), so let's just clear the page. | 215 | * about storage keys (yet), so let's just clear the page. |
182 | */ | 216 | */ |
183 | if (clear_user((void __user *)hva, PAGE_SIZE) != 0) | 217 | if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) |
184 | return -EFAULT; | 218 | return -EFAULT; |
185 | kvm_s390_set_psw_cc(vcpu, 0); | 219 | kvm_s390_set_psw_cc(vcpu, 0); |
186 | vcpu->run->s.regs.gprs[0] = 0; | 220 | vcpu->run->s.regs.gprs[0] = 0; |
@@ -190,9 +224,12 @@ static int handle_test_block(struct kvm_vcpu *vcpu) | |||
190 | static int handle_tpi(struct kvm_vcpu *vcpu) | 224 | static int handle_tpi(struct kvm_vcpu *vcpu) |
191 | { | 225 | { |
192 | struct kvm_s390_interrupt_info *inti; | 226 | struct kvm_s390_interrupt_info *inti; |
227 | unsigned long len; | ||
228 | u32 tpi_data[3]; | ||
229 | int cc, rc; | ||
193 | u64 addr; | 230 | u64 addr; |
194 | int cc; | ||
195 | 231 | ||
232 | rc = 0; | ||
196 | addr = kvm_s390_get_base_disp_s(vcpu); | 233 | addr = kvm_s390_get_base_disp_s(vcpu); |
197 | if (addr & 3) | 234 | if (addr & 3) |
198 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 235 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
@@ -201,30 +238,41 @@ static int handle_tpi(struct kvm_vcpu *vcpu) | |||
201 | if (!inti) | 238 | if (!inti) |
202 | goto no_interrupt; | 239 | goto no_interrupt; |
203 | cc = 1; | 240 | cc = 1; |
241 | tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; | ||
242 | tpi_data[1] = inti->io.io_int_parm; | ||
243 | tpi_data[2] = inti->io.io_int_word; | ||
204 | if (addr) { | 244 | if (addr) { |
205 | /* | 245 | /* |
206 | * Store the two-word I/O interruption code into the | 246 | * Store the two-word I/O interruption code into the |
207 | * provided area. | 247 | * provided area. |
208 | */ | 248 | */ |
209 | if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr) | 249 | len = sizeof(tpi_data) - 4; |
210 | || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2)) | 250 | rc = write_guest(vcpu, addr, &tpi_data, len); |
211 | || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4))) | 251 | if (rc) |
212 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 252 | return kvm_s390_inject_prog_cond(vcpu, rc); |
213 | } else { | 253 | } else { |
214 | /* | 254 | /* |
215 | * Store the three-word I/O interruption code into | 255 | * Store the three-word I/O interruption code into |
216 | * the appropriate lowcore area. | 256 | * the appropriate lowcore area. |
217 | */ | 257 | */ |
218 | put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID); | 258 | len = sizeof(tpi_data); |
219 | put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR); | 259 | if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) |
220 | put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM); | 260 | rc = -EFAULT; |
221 | put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD); | ||
222 | } | 261 | } |
223 | kfree(inti); | 262 | /* |
263 | * If we encounter a problem storing the interruption code, the | ||
264 | * instruction is suppressed from the guest's view: reinject the | ||
265 | * interrupt. | ||
266 | */ | ||
267 | if (!rc) | ||
268 | kfree(inti); | ||
269 | else | ||
270 | kvm_s390_reinject_io_int(vcpu->kvm, inti); | ||
224 | no_interrupt: | 271 | no_interrupt: |
225 | /* Set condition code and we're done. */ | 272 | /* Set condition code and we're done. */ |
226 | kvm_s390_set_psw_cc(vcpu, cc); | 273 | if (!rc) |
227 | return 0; | 274 | kvm_s390_set_psw_cc(vcpu, cc); |
275 | return rc ? -EFAULT : 0; | ||
228 | } | 276 | } |
229 | 277 | ||
230 | static int handle_tsch(struct kvm_vcpu *vcpu) | 278 | static int handle_tsch(struct kvm_vcpu *vcpu) |
@@ -292,10 +340,10 @@ static int handle_stfl(struct kvm_vcpu *vcpu) | |||
292 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 340 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
293 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 341 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
294 | 342 | ||
295 | rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), | 343 | rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), |
296 | vfacilities, 4); | 344 | vfacilities, 4); |
297 | if (rc) | 345 | if (rc) |
298 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 346 | return rc; |
299 | VCPU_EVENT(vcpu, 5, "store facility list value %x", | 347 | VCPU_EVENT(vcpu, 5, "store facility list value %x", |
300 | *(unsigned int *) vfacilities); | 348 | *(unsigned int *) vfacilities); |
301 | trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities); | 349 | trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities); |
@@ -333,6 +381,7 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) | |||
333 | psw_t *gpsw = &vcpu->arch.sie_block->gpsw; | 381 | psw_t *gpsw = &vcpu->arch.sie_block->gpsw; |
334 | psw_compat_t new_psw; | 382 | psw_compat_t new_psw; |
335 | u64 addr; | 383 | u64 addr; |
384 | int rc; | ||
336 | 385 | ||
337 | if (gpsw->mask & PSW_MASK_PSTATE) | 386 | if (gpsw->mask & PSW_MASK_PSTATE) |
338 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 387 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
@@ -340,8 +389,10 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) | |||
340 | addr = kvm_s390_get_base_disp_s(vcpu); | 389 | addr = kvm_s390_get_base_disp_s(vcpu); |
341 | if (addr & 7) | 390 | if (addr & 7) |
342 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 391 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
343 | if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) | 392 | |
344 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 393 | rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); |
394 | if (rc) | ||
395 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
345 | if (!(new_psw.mask & PSW32_MASK_BASE)) | 396 | if (!(new_psw.mask & PSW32_MASK_BASE)) |
346 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 397 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
347 | gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; | 398 | gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; |
@@ -357,6 +408,7 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) | |||
357 | { | 408 | { |
358 | psw_t new_psw; | 409 | psw_t new_psw; |
359 | u64 addr; | 410 | u64 addr; |
411 | int rc; | ||
360 | 412 | ||
361 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 413 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
362 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 414 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
@@ -364,8 +416,9 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) | |||
364 | addr = kvm_s390_get_base_disp_s(vcpu); | 416 | addr = kvm_s390_get_base_disp_s(vcpu); |
365 | if (addr & 7) | 417 | if (addr & 7) |
366 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 418 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
367 | if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) | 419 | rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); |
368 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 420 | if (rc) |
421 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
369 | vcpu->arch.sie_block->gpsw = new_psw; | 422 | vcpu->arch.sie_block->gpsw = new_psw; |
370 | if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) | 423 | if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) |
371 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 424 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
@@ -375,7 +428,9 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) | |||
375 | 428 | ||
376 | static int handle_stidp(struct kvm_vcpu *vcpu) | 429 | static int handle_stidp(struct kvm_vcpu *vcpu) |
377 | { | 430 | { |
431 | u64 stidp_data = vcpu->arch.stidp_data; | ||
378 | u64 operand2; | 432 | u64 operand2; |
433 | int rc; | ||
379 | 434 | ||
380 | vcpu->stat.instruction_stidp++; | 435 | vcpu->stat.instruction_stidp++; |
381 | 436 | ||
@@ -387,8 +442,9 @@ static int handle_stidp(struct kvm_vcpu *vcpu) | |||
387 | if (operand2 & 7) | 442 | if (operand2 & 7) |
388 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 443 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
389 | 444 | ||
390 | if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2)) | 445 | rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data)); |
391 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 446 | if (rc) |
447 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
392 | 448 | ||
393 | VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); | 449 | VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); |
394 | return 0; | 450 | return 0; |
@@ -474,9 +530,10 @@ static int handle_stsi(struct kvm_vcpu *vcpu) | |||
474 | break; | 530 | break; |
475 | } | 531 | } |
476 | 532 | ||
477 | if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { | 533 | rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE); |
478 | rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 534 | if (rc) { |
479 | goto out_exception; | 535 | rc = kvm_s390_inject_prog_cond(vcpu, rc); |
536 | goto out; | ||
480 | } | 537 | } |
481 | trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); | 538 | trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); |
482 | free_page(mem); | 539 | free_page(mem); |
@@ -485,7 +542,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) | |||
485 | return 0; | 542 | return 0; |
486 | out_no_data: | 543 | out_no_data: |
487 | kvm_s390_set_psw_cc(vcpu, 3); | 544 | kvm_s390_set_psw_cc(vcpu, 3); |
488 | out_exception: | 545 | out: |
489 | free_page(mem); | 546 | free_page(mem); |
490 | return rc; | 547 | return rc; |
491 | } | 548 | } |
@@ -496,6 +553,7 @@ static const intercept_handler_t b2_handlers[256] = { | |||
496 | [0x10] = handle_set_prefix, | 553 | [0x10] = handle_set_prefix, |
497 | [0x11] = handle_store_prefix, | 554 | [0x11] = handle_store_prefix, |
498 | [0x12] = handle_store_cpu_address, | 555 | [0x12] = handle_store_cpu_address, |
556 | [0x21] = handle_ipte_interlock, | ||
499 | [0x29] = handle_skey, | 557 | [0x29] = handle_skey, |
500 | [0x2a] = handle_skey, | 558 | [0x2a] = handle_skey, |
501 | [0x2b] = handle_skey, | 559 | [0x2b] = handle_skey, |
@@ -513,6 +571,7 @@ static const intercept_handler_t b2_handlers[256] = { | |||
513 | [0x3a] = handle_io_inst, | 571 | [0x3a] = handle_io_inst, |
514 | [0x3b] = handle_io_inst, | 572 | [0x3b] = handle_io_inst, |
515 | [0x3c] = handle_io_inst, | 573 | [0x3c] = handle_io_inst, |
574 | [0x50] = handle_ipte_interlock, | ||
516 | [0x5f] = handle_io_inst, | 575 | [0x5f] = handle_io_inst, |
517 | [0x74] = handle_io_inst, | 576 | [0x74] = handle_io_inst, |
518 | [0x76] = handle_io_inst, | 577 | [0x76] = handle_io_inst, |
@@ -618,6 +677,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) | |||
618 | } | 677 | } |
619 | 678 | ||
620 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { | 679 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { |
680 | __skey_check_enable(vcpu); | ||
621 | if (set_guest_storage_key(current->mm, useraddr, | 681 | if (set_guest_storage_key(current->mm, useraddr, |
622 | vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, | 682 | vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, |
623 | vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) | 683 | vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) |
@@ -642,7 +702,7 @@ static int handle_essa(struct kvm_vcpu *vcpu) | |||
642 | VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); | 702 | VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); |
643 | gmap = vcpu->arch.gmap; | 703 | gmap = vcpu->arch.gmap; |
644 | vcpu->stat.instruction_essa++; | 704 | vcpu->stat.instruction_essa++; |
645 | if (!kvm_enabled_cmma() || !vcpu->arch.sie_block->cbrlo) | 705 | if (!kvm_s390_cmma_enabled(vcpu->kvm)) |
646 | return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); | 706 | return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); |
647 | 707 | ||
648 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 708 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
@@ -672,7 +732,10 @@ static int handle_essa(struct kvm_vcpu *vcpu) | |||
672 | } | 732 | } |
673 | 733 | ||
674 | static const intercept_handler_t b9_handlers[256] = { | 734 | static const intercept_handler_t b9_handlers[256] = { |
735 | [0x8a] = handle_ipte_interlock, | ||
675 | [0x8d] = handle_epsw, | 736 | [0x8d] = handle_epsw, |
737 | [0x8e] = handle_ipte_interlock, | ||
738 | [0x8f] = handle_ipte_interlock, | ||
676 | [0xab] = handle_essa, | 739 | [0xab] = handle_essa, |
677 | [0xaf] = handle_pfmf, | 740 | [0xaf] = handle_pfmf, |
678 | }; | 741 | }; |
@@ -693,32 +756,67 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) | |||
693 | { | 756 | { |
694 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | 757 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; |
695 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; | 758 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; |
696 | u64 useraddr; | ||
697 | u32 val = 0; | 759 | u32 val = 0; |
698 | int reg, rc; | 760 | int reg, rc; |
761 | u64 ga; | ||
699 | 762 | ||
700 | vcpu->stat.instruction_lctl++; | 763 | vcpu->stat.instruction_lctl++; |
701 | 764 | ||
702 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 765 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
703 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 766 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
704 | 767 | ||
705 | useraddr = kvm_s390_get_base_disp_rs(vcpu); | 768 | ga = kvm_s390_get_base_disp_rs(vcpu); |
706 | 769 | ||
707 | if (useraddr & 3) | 770 | if (ga & 3) |
708 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 771 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
709 | 772 | ||
710 | VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, | 773 | VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); |
711 | useraddr); | 774 | trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); |
712 | trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr); | ||
713 | 775 | ||
714 | reg = reg1; | 776 | reg = reg1; |
715 | do { | 777 | do { |
716 | rc = get_guest(vcpu, val, (u32 __user *) useraddr); | 778 | rc = read_guest(vcpu, ga, &val, sizeof(val)); |
717 | if (rc) | 779 | if (rc) |
718 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 780 | return kvm_s390_inject_prog_cond(vcpu, rc); |
719 | vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; | 781 | vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; |
720 | vcpu->arch.sie_block->gcr[reg] |= val; | 782 | vcpu->arch.sie_block->gcr[reg] |= val; |
721 | useraddr += 4; | 783 | ga += 4; |
784 | if (reg == reg3) | ||
785 | break; | ||
786 | reg = (reg + 1) % 16; | ||
787 | } while (1); | ||
788 | |||
789 | return 0; | ||
790 | } | ||
791 | |||
792 | int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) | ||
793 | { | ||
794 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | ||
795 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; | ||
796 | u64 ga; | ||
797 | u32 val; | ||
798 | int reg, rc; | ||
799 | |||
800 | vcpu->stat.instruction_stctl++; | ||
801 | |||
802 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
803 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
804 | |||
805 | ga = kvm_s390_get_base_disp_rs(vcpu); | ||
806 | |||
807 | if (ga & 3) | ||
808 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
809 | |||
810 | VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); | ||
811 | trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); | ||
812 | |||
813 | reg = reg1; | ||
814 | do { | ||
815 | val = vcpu->arch.sie_block->gcr[reg] & 0x00000000fffffffful; | ||
816 | rc = write_guest(vcpu, ga, &val, sizeof(val)); | ||
817 | if (rc) | ||
818 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
819 | ga += 4; | ||
722 | if (reg == reg3) | 820 | if (reg == reg3) |
723 | break; | 821 | break; |
724 | reg = (reg + 1) % 16; | 822 | reg = (reg + 1) % 16; |
@@ -731,7 +829,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) | |||
731 | { | 829 | { |
732 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | 830 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; |
733 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; | 831 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; |
734 | u64 useraddr; | 832 | u64 ga, val; |
735 | int reg, rc; | 833 | int reg, rc; |
736 | 834 | ||
737 | vcpu->stat.instruction_lctlg++; | 835 | vcpu->stat.instruction_lctlg++; |
@@ -739,23 +837,58 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) | |||
739 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 837 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
740 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 838 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
741 | 839 | ||
742 | useraddr = kvm_s390_get_base_disp_rsy(vcpu); | 840 | ga = kvm_s390_get_base_disp_rsy(vcpu); |
743 | 841 | ||
744 | if (useraddr & 7) | 842 | if (ga & 7) |
745 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 843 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
746 | 844 | ||
747 | reg = reg1; | 845 | reg = reg1; |
748 | 846 | ||
749 | VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, | 847 | VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); |
750 | useraddr); | 848 | trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); |
751 | trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr); | ||
752 | 849 | ||
753 | do { | 850 | do { |
754 | rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg], | 851 | rc = read_guest(vcpu, ga, &val, sizeof(val)); |
755 | (u64 __user *) useraddr); | ||
756 | if (rc) | 852 | if (rc) |
757 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 853 | return kvm_s390_inject_prog_cond(vcpu, rc); |
758 | useraddr += 8; | 854 | vcpu->arch.sie_block->gcr[reg] = val; |
855 | ga += 8; | ||
856 | if (reg == reg3) | ||
857 | break; | ||
858 | reg = (reg + 1) % 16; | ||
859 | } while (1); | ||
860 | |||
861 | return 0; | ||
862 | } | ||
863 | |||
864 | static int handle_stctg(struct kvm_vcpu *vcpu) | ||
865 | { | ||
866 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | ||
867 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; | ||
868 | u64 ga, val; | ||
869 | int reg, rc; | ||
870 | |||
871 | vcpu->stat.instruction_stctg++; | ||
872 | |||
873 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
874 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
875 | |||
876 | ga = kvm_s390_get_base_disp_rsy(vcpu); | ||
877 | |||
878 | if (ga & 7) | ||
879 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
880 | |||
881 | reg = reg1; | ||
882 | |||
883 | VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); | ||
884 | trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); | ||
885 | |||
886 | do { | ||
887 | val = vcpu->arch.sie_block->gcr[reg]; | ||
888 | rc = write_guest(vcpu, ga, &val, sizeof(val)); | ||
889 | if (rc) | ||
890 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
891 | ga += 8; | ||
759 | if (reg == reg3) | 892 | if (reg == reg3) |
760 | break; | 893 | break; |
761 | reg = (reg + 1) % 16; | 894 | reg = (reg + 1) % 16; |
@@ -766,6 +899,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) | |||
766 | 899 | ||
767 | static const intercept_handler_t eb_handlers[256] = { | 900 | static const intercept_handler_t eb_handlers[256] = { |
768 | [0x2f] = handle_lctlg, | 901 | [0x2f] = handle_lctlg, |
902 | [0x25] = handle_stctg, | ||
769 | }; | 903 | }; |
770 | 904 | ||
771 | int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) | 905 | int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 26caeb530a78..c0b99e0f6b63 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -235,7 +235,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
235 | struct kvm_vcpu *dst_vcpu = NULL; | 235 | struct kvm_vcpu *dst_vcpu = NULL; |
236 | struct kvm_s390_interrupt_info *inti; | 236 | struct kvm_s390_interrupt_info *inti; |
237 | int rc; | 237 | int rc; |
238 | u8 tmp; | ||
239 | 238 | ||
240 | if (cpu_addr < KVM_MAX_VCPUS) | 239 | if (cpu_addr < KVM_MAX_VCPUS) |
241 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | 240 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); |
@@ -243,10 +242,13 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
243 | return SIGP_CC_NOT_OPERATIONAL; | 242 | return SIGP_CC_NOT_OPERATIONAL; |
244 | li = &dst_vcpu->arch.local_int; | 243 | li = &dst_vcpu->arch.local_int; |
245 | 244 | ||
246 | /* make sure that the new value is valid memory */ | 245 | /* |
247 | address = address & 0x7fffe000u; | 246 | * Make sure the new value is valid memory. We only need to check the |
248 | if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || | 247 | * first page, since address is 8k aligned and memory pieces are always |
249 | copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) { | 248 | * at least 1MB aligned and have at least a size of 1MB. |
249 | */ | ||
250 | address &= 0x7fffe000u; | ||
251 | if (kvm_is_error_gpa(vcpu->kvm, address)) { | ||
250 | *reg &= 0xffffffff00000000UL; | 252 | *reg &= 0xffffffff00000000UL; |
251 | *reg |= SIGP_STATUS_INVALID_PARAMETER; | 253 | *reg |= SIGP_STATUS_INVALID_PARAMETER; |
252 | return SIGP_CC_STATUS_STORED; | 254 | return SIGP_CC_STATUS_STORED; |
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h index e8e7213d4cc5..abf6ba52769e 100644 --- a/arch/s390/kvm/trace.h +++ b/arch/s390/kvm/trace.h | |||
@@ -30,6 +30,20 @@ | |||
30 | TP_printk("%02d[%016lx-%016lx]: " p_str, __entry->id, \ | 30 | TP_printk("%02d[%016lx-%016lx]: " p_str, __entry->id, \ |
31 | __entry->pswmask, __entry->pswaddr, p_args) | 31 | __entry->pswmask, __entry->pswaddr, p_args) |
32 | 32 | ||
33 | TRACE_EVENT(kvm_s390_skey_related_inst, | ||
34 | TP_PROTO(VCPU_PROTO_COMMON), | ||
35 | TP_ARGS(VCPU_ARGS_COMMON), | ||
36 | |||
37 | TP_STRUCT__entry( | ||
38 | VCPU_FIELD_COMMON | ||
39 | ), | ||
40 | |||
41 | TP_fast_assign( | ||
42 | VCPU_ASSIGN_COMMON | ||
43 | ), | ||
44 | VCPU_TP_PRINTK("%s", "first instruction related to skeys on vcpu") | ||
45 | ); | ||
46 | |||
33 | TRACE_EVENT(kvm_s390_major_guest_pfault, | 47 | TRACE_EVENT(kvm_s390_major_guest_pfault, |
34 | TP_PROTO(VCPU_PROTO_COMMON), | 48 | TP_PROTO(VCPU_PROTO_COMMON), |
35 | TP_ARGS(VCPU_ARGS_COMMON), | 49 | TP_ARGS(VCPU_ARGS_COMMON), |
@@ -301,6 +315,31 @@ TRACE_EVENT(kvm_s390_handle_lctl, | |||
301 | __entry->reg1, __entry->reg3, __entry->addr) | 315 | __entry->reg1, __entry->reg3, __entry->addr) |
302 | ); | 316 | ); |
303 | 317 | ||
318 | TRACE_EVENT(kvm_s390_handle_stctl, | ||
319 | TP_PROTO(VCPU_PROTO_COMMON, int g, int reg1, int reg3, u64 addr), | ||
320 | TP_ARGS(VCPU_ARGS_COMMON, g, reg1, reg3, addr), | ||
321 | |||
322 | TP_STRUCT__entry( | ||
323 | VCPU_FIELD_COMMON | ||
324 | __field(int, g) | ||
325 | __field(int, reg1) | ||
326 | __field(int, reg3) | ||
327 | __field(u64, addr) | ||
328 | ), | ||
329 | |||
330 | TP_fast_assign( | ||
331 | VCPU_ASSIGN_COMMON | ||
332 | __entry->g = g; | ||
333 | __entry->reg1 = reg1; | ||
334 | __entry->reg3 = reg3; | ||
335 | __entry->addr = addr; | ||
336 | ), | ||
337 | |||
338 | VCPU_TP_PRINTK("%s: storing cr %x-%x to %016llx", | ||
339 | __entry->g ? "stctg" : "stctl", | ||
340 | __entry->reg1, __entry->reg3, __entry->addr) | ||
341 | ); | ||
342 | |||
304 | TRACE_EVENT(kvm_s390_handle_prefix, | 343 | TRACE_EVENT(kvm_s390_handle_prefix, |
305 | TP_PROTO(VCPU_PROTO_COMMON, int set, u32 address), | 344 | TP_PROTO(VCPU_PROTO_COMMON, int set, u32 address), |
306 | TP_ARGS(VCPU_ARGS_COMMON, set, address), | 345 | TP_ARGS(VCPU_ARGS_COMMON, set, address), |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index d7cfd57815fb..ea4a31b95990 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -832,6 +832,7 @@ void gmap_do_ipte_notify(struct mm_struct *mm, pte_t *pte) | |||
832 | } | 832 | } |
833 | spin_unlock(&gmap_notifier_lock); | 833 | spin_unlock(&gmap_notifier_lock); |
834 | } | 834 | } |
835 | EXPORT_SYMBOL_GPL(gmap_do_ipte_notify); | ||
835 | 836 | ||
836 | static inline int page_table_with_pgste(struct page *page) | 837 | static inline int page_table_with_pgste(struct page *page) |
837 | { | 838 | { |
@@ -864,8 +865,7 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, | |||
864 | atomic_set(&page->_mapcount, 0); | 865 | atomic_set(&page->_mapcount, 0); |
865 | table = (unsigned long *) page_to_phys(page); | 866 | table = (unsigned long *) page_to_phys(page); |
866 | clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); | 867 | clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); |
867 | clear_table(table + PTRS_PER_PTE, PGSTE_HR_BIT | PGSTE_HC_BIT, | 868 | clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); |
868 | PAGE_SIZE/2); | ||
869 | return table; | 869 | return table; |
870 | } | 870 | } |
871 | 871 | ||
@@ -883,8 +883,8 @@ static inline void page_table_free_pgste(unsigned long *table) | |||
883 | __free_page(page); | 883 | __free_page(page); |
884 | } | 884 | } |
885 | 885 | ||
886 | static inline unsigned long page_table_reset_pte(struct mm_struct *mm, | 886 | static inline unsigned long page_table_reset_pte(struct mm_struct *mm, pmd_t *pmd, |
887 | pmd_t *pmd, unsigned long addr, unsigned long end) | 887 | unsigned long addr, unsigned long end, bool init_skey) |
888 | { | 888 | { |
889 | pte_t *start_pte, *pte; | 889 | pte_t *start_pte, *pte; |
890 | spinlock_t *ptl; | 890 | spinlock_t *ptl; |
@@ -895,6 +895,22 @@ static inline unsigned long page_table_reset_pte(struct mm_struct *mm, | |||
895 | do { | 895 | do { |
896 | pgste = pgste_get_lock(pte); | 896 | pgste = pgste_get_lock(pte); |
897 | pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; | 897 | pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; |
898 | if (init_skey) { | ||
899 | unsigned long address; | ||
900 | |||
901 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | | ||
902 | PGSTE_GR_BIT | PGSTE_GC_BIT); | ||
903 | |||
904 | /* skip invalid and not writable pages */ | ||
905 | if (pte_val(*pte) & _PAGE_INVALID || | ||
906 | !(pte_val(*pte) & _PAGE_WRITE)) { | ||
907 | pgste_set_unlock(pte, pgste); | ||
908 | continue; | ||
909 | } | ||
910 | |||
911 | address = pte_val(*pte) & PAGE_MASK; | ||
912 | page_set_storage_key(address, PAGE_DEFAULT_KEY, 1); | ||
913 | } | ||
898 | pgste_set_unlock(pte, pgste); | 914 | pgste_set_unlock(pte, pgste); |
899 | } while (pte++, addr += PAGE_SIZE, addr != end); | 915 | } while (pte++, addr += PAGE_SIZE, addr != end); |
900 | pte_unmap_unlock(start_pte, ptl); | 916 | pte_unmap_unlock(start_pte, ptl); |
@@ -902,8 +918,8 @@ static inline unsigned long page_table_reset_pte(struct mm_struct *mm, | |||
902 | return addr; | 918 | return addr; |
903 | } | 919 | } |
904 | 920 | ||
905 | static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, | 921 | static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, pud_t *pud, |
906 | pud_t *pud, unsigned long addr, unsigned long end) | 922 | unsigned long addr, unsigned long end, bool init_skey) |
907 | { | 923 | { |
908 | unsigned long next; | 924 | unsigned long next; |
909 | pmd_t *pmd; | 925 | pmd_t *pmd; |
@@ -913,14 +929,14 @@ static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, | |||
913 | next = pmd_addr_end(addr, end); | 929 | next = pmd_addr_end(addr, end); |
914 | if (pmd_none_or_clear_bad(pmd)) | 930 | if (pmd_none_or_clear_bad(pmd)) |
915 | continue; | 931 | continue; |
916 | next = page_table_reset_pte(mm, pmd, addr, next); | 932 | next = page_table_reset_pte(mm, pmd, addr, next, init_skey); |
917 | } while (pmd++, addr = next, addr != end); | 933 | } while (pmd++, addr = next, addr != end); |
918 | 934 | ||
919 | return addr; | 935 | return addr; |
920 | } | 936 | } |
921 | 937 | ||
922 | static inline unsigned long page_table_reset_pud(struct mm_struct *mm, | 938 | static inline unsigned long page_table_reset_pud(struct mm_struct *mm, pgd_t *pgd, |
923 | pgd_t *pgd, unsigned long addr, unsigned long end) | 939 | unsigned long addr, unsigned long end, bool init_skey) |
924 | { | 940 | { |
925 | unsigned long next; | 941 | unsigned long next; |
926 | pud_t *pud; | 942 | pud_t *pud; |
@@ -930,14 +946,14 @@ static inline unsigned long page_table_reset_pud(struct mm_struct *mm, | |||
930 | next = pud_addr_end(addr, end); | 946 | next = pud_addr_end(addr, end); |
931 | if (pud_none_or_clear_bad(pud)) | 947 | if (pud_none_or_clear_bad(pud)) |
932 | continue; | 948 | continue; |
933 | next = page_table_reset_pmd(mm, pud, addr, next); | 949 | next = page_table_reset_pmd(mm, pud, addr, next, init_skey); |
934 | } while (pud++, addr = next, addr != end); | 950 | } while (pud++, addr = next, addr != end); |
935 | 951 | ||
936 | return addr; | 952 | return addr; |
937 | } | 953 | } |
938 | 954 | ||
939 | void page_table_reset_pgste(struct mm_struct *mm, | 955 | void page_table_reset_pgste(struct mm_struct *mm, unsigned long start, |
940 | unsigned long start, unsigned long end) | 956 | unsigned long end, bool init_skey) |
941 | { | 957 | { |
942 | unsigned long addr, next; | 958 | unsigned long addr, next; |
943 | pgd_t *pgd; | 959 | pgd_t *pgd; |
@@ -949,7 +965,7 @@ void page_table_reset_pgste(struct mm_struct *mm, | |||
949 | next = pgd_addr_end(addr, end); | 965 | next = pgd_addr_end(addr, end); |
950 | if (pgd_none_or_clear_bad(pgd)) | 966 | if (pgd_none_or_clear_bad(pgd)) |
951 | continue; | 967 | continue; |
952 | next = page_table_reset_pud(mm, pgd, addr, next); | 968 | next = page_table_reset_pud(mm, pgd, addr, next, init_skey); |
953 | } while (pgd++, addr = next, addr != end); | 969 | } while (pgd++, addr = next, addr != end); |
954 | up_read(&mm->mmap_sem); | 970 | up_read(&mm->mmap_sem); |
955 | } | 971 | } |
@@ -989,7 +1005,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, | |||
989 | /* changing the guest storage key is considered a change of the page */ | 1005 | /* changing the guest storage key is considered a change of the page */ |
990 | if ((pgste_val(new) ^ pgste_val(old)) & | 1006 | if ((pgste_val(new) ^ pgste_val(old)) & |
991 | (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) | 1007 | (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) |
992 | pgste_val(new) |= PGSTE_HC_BIT; | 1008 | pgste_val(new) |= PGSTE_UC_BIT; |
993 | 1009 | ||
994 | pgste_set_unlock(ptep, new); | 1010 | pgste_set_unlock(ptep, new); |
995 | pte_unmap_unlock(*ptep, ptl); | 1011 | pte_unmap_unlock(*ptep, ptl); |
@@ -1011,6 +1027,11 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, | |||
1011 | return NULL; | 1027 | return NULL; |
1012 | } | 1028 | } |
1013 | 1029 | ||
1030 | void page_table_reset_pgste(struct mm_struct *mm, unsigned long start, | ||
1031 | unsigned long end, bool init_skey) | ||
1032 | { | ||
1033 | } | ||
1034 | |||
1014 | static inline void page_table_free_pgste(unsigned long *table) | 1035 | static inline void page_table_free_pgste(unsigned long *table) |
1015 | { | 1036 | { |
1016 | } | 1037 | } |
@@ -1357,6 +1378,50 @@ int s390_enable_sie(void) | |||
1357 | } | 1378 | } |
1358 | EXPORT_SYMBOL_GPL(s390_enable_sie); | 1379 | EXPORT_SYMBOL_GPL(s390_enable_sie); |
1359 | 1380 | ||
1381 | /* | ||
1382 | * Enable storage key handling from now on and initialize the storage | ||
1383 | * keys with the default key. | ||
1384 | */ | ||
1385 | void s390_enable_skey(void) | ||
1386 | { | ||
1387 | /* | ||
1388 | * To avoid races between multiple vcpus, ending in calling | ||
1389 | * page_table_reset twice or more, | ||
1390 | * the page_table_lock is taken for serialization. | ||
1391 | */ | ||
1392 | spin_lock(¤t->mm->page_table_lock); | ||
1393 | if (mm_use_skey(current->mm)) { | ||
1394 | spin_unlock(¤t->mm->page_table_lock); | ||
1395 | return; | ||
1396 | } | ||
1397 | |||
1398 | current->mm->context.use_skey = 1; | ||
1399 | spin_unlock(¤t->mm->page_table_lock); | ||
1400 | page_table_reset_pgste(current->mm, 0, TASK_SIZE, true); | ||
1401 | } | ||
1402 | EXPORT_SYMBOL_GPL(s390_enable_skey); | ||
1403 | |||
1404 | /* | ||
1405 | * Test and reset if a guest page is dirty | ||
1406 | */ | ||
1407 | bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap) | ||
1408 | { | ||
1409 | pte_t *pte; | ||
1410 | spinlock_t *ptl; | ||
1411 | bool dirty = false; | ||
1412 | |||
1413 | pte = get_locked_pte(gmap->mm, address, &ptl); | ||
1414 | if (unlikely(!pte)) | ||
1415 | return false; | ||
1416 | |||
1417 | if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte)) | ||
1418 | dirty = true; | ||
1419 | |||
1420 | spin_unlock(ptl); | ||
1421 | return dirty; | ||
1422 | } | ||
1423 | EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty); | ||
1424 | |||
1360 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 1425 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1361 | int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, | 1426 | int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, |
1362 | pmd_t *pmdp) | 1427 | pmd_t *pmdp) |
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index 14196ea0fdf3..b57fe0efb422 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c | |||
@@ -22,7 +22,8 @@ struct read_info_sccb { | |||
22 | u8 rnsize; /* 10 */ | 22 | u8 rnsize; /* 10 */ |
23 | u8 _reserved0[16 - 11]; /* 11-15 */ | 23 | u8 _reserved0[16 - 11]; /* 11-15 */ |
24 | u16 ncpurl; /* 16-17 */ | 24 | u16 ncpurl; /* 16-17 */ |
25 | u8 _reserved7[24 - 18]; /* 18-23 */ | 25 | u16 cpuoff; /* 18-19 */ |
26 | u8 _reserved7[24 - 20]; /* 20-23 */ | ||
26 | u8 loadparm[8]; /* 24-31 */ | 27 | u8 loadparm[8]; /* 24-31 */ |
27 | u8 _reserved1[48 - 32]; /* 32-47 */ | 28 | u8 _reserved1[48 - 32]; /* 32-47 */ |
28 | u64 facilities; /* 48-55 */ | 29 | u64 facilities; /* 48-55 */ |
@@ -45,6 +46,7 @@ static unsigned int sclp_con_has_linemode __initdata; | |||
45 | static unsigned long sclp_hsa_size; | 46 | static unsigned long sclp_hsa_size; |
46 | static unsigned int sclp_max_cpu; | 47 | static unsigned int sclp_max_cpu; |
47 | static struct sclp_ipl_info sclp_ipl_info; | 48 | static struct sclp_ipl_info sclp_ipl_info; |
49 | static unsigned char sclp_siif; | ||
48 | 50 | ||
49 | u64 sclp_facilities; | 51 | u64 sclp_facilities; |
50 | u8 sclp_fac84; | 52 | u8 sclp_fac84; |
@@ -96,6 +98,9 @@ static int __init sclp_read_info_early(struct read_info_sccb *sccb) | |||
96 | 98 | ||
97 | static void __init sclp_facilities_detect(struct read_info_sccb *sccb) | 99 | static void __init sclp_facilities_detect(struct read_info_sccb *sccb) |
98 | { | 100 | { |
101 | struct sclp_cpu_entry *cpue; | ||
102 | u16 boot_cpu_address, cpu; | ||
103 | |||
99 | if (sclp_read_info_early(sccb)) | 104 | if (sclp_read_info_early(sccb)) |
100 | return; | 105 | return; |
101 | 106 | ||
@@ -116,6 +121,15 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) | |||
116 | sclp_max_cpu = sccb->hcpua + 1; | 121 | sclp_max_cpu = sccb->hcpua + 1; |
117 | } | 122 | } |
118 | 123 | ||
124 | boot_cpu_address = stap(); | ||
125 | cpue = (void *)sccb + sccb->cpuoff; | ||
126 | for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) { | ||
127 | if (boot_cpu_address != cpue->address) | ||
128 | continue; | ||
129 | sclp_siif = cpue->siif; | ||
130 | break; | ||
131 | } | ||
132 | |||
119 | /* Save IPL information */ | 133 | /* Save IPL information */ |
120 | sclp_ipl_info.is_valid = 1; | 134 | sclp_ipl_info.is_valid = 1; |
121 | if (sccb->flags & 0x2) | 135 | if (sccb->flags & 0x2) |
@@ -148,6 +162,12 @@ unsigned int sclp_get_max_cpu(void) | |||
148 | return sclp_max_cpu; | 162 | return sclp_max_cpu; |
149 | } | 163 | } |
150 | 164 | ||
165 | int sclp_has_siif(void) | ||
166 | { | ||
167 | return sclp_siif; | ||
168 | } | ||
169 | EXPORT_SYMBOL(sclp_has_siif); | ||
170 | |||
151 | /* | 171 | /* |
152 | * This function will be called after sclp_facilities_detect(), which gets | 172 | * This function will be called after sclp_facilities_detect(), which gets |
153 | * called from early.c code. The sclp_facilities_detect() function retrieves | 173 | * called from early.c code. The sclp_facilities_detect() function retrieves |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 6c3c2eb96d06..32d263f683dc 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -880,6 +880,13 @@ static inline hpa_t pfn_to_hpa(pfn_t pfn) | |||
880 | return (hpa_t)pfn << PAGE_SHIFT; | 880 | return (hpa_t)pfn << PAGE_SHIFT; |
881 | } | 881 | } |
882 | 882 | ||
883 | static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) | ||
884 | { | ||
885 | unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); | ||
886 | |||
887 | return kvm_is_error_hva(hva); | ||
888 | } | ||
889 | |||
883 | static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) | 890 | static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) |
884 | { | 891 | { |
885 | set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); | 892 | set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); |
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index d8a6ce4c2a83..836e15b7abc8 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h | |||
@@ -745,6 +745,7 @@ struct kvm_ppc_smmu_info { | |||
745 | #define KVM_CAP_ENABLE_CAP_VM 98 | 745 | #define KVM_CAP_ENABLE_CAP_VM 98 |
746 | #define KVM_CAP_S390_IRQCHIP 99 | 746 | #define KVM_CAP_S390_IRQCHIP 99 |
747 | #define KVM_CAP_IOEVENTFD_NO_LENGTH 100 | 747 | #define KVM_CAP_IOEVENTFD_NO_LENGTH 100 |
748 | #define KVM_CAP_VM_ATTRIBUTES 101 | ||
748 | 749 | ||
749 | #ifdef KVM_CAP_IRQ_ROUTING | 750 | #ifdef KVM_CAP_IRQ_ROUTING |
750 | 751 | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 96456ac888ba..ea46d64c8e75 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -637,14 +637,12 @@ static int kvm_vm_release(struct inode *inode, struct file *filp) | |||
637 | */ | 637 | */ |
638 | static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) | 638 | static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) |
639 | { | 639 | { |
640 | #ifndef CONFIG_S390 | ||
641 | unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); | 640 | unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); |
642 | 641 | ||
643 | memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); | 642 | memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); |
644 | if (!memslot->dirty_bitmap) | 643 | if (!memslot->dirty_bitmap) |
645 | return -ENOMEM; | 644 | return -ENOMEM; |
646 | 645 | ||
647 | #endif /* !CONFIG_S390 */ | ||
648 | return 0; | 646 | return 0; |
649 | } | 647 | } |
650 | 648 | ||