aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2014-01-01 10:26:52 -0500
committerChristian Borntraeger <borntraeger@de.ibm.com>2014-04-22 07:24:38 -0400
commit2293897805c2fea69e45aca31b3589d4590af89d (patch)
tree21fc9661bc2936bca9ba3054f02613859c53d663
parentd95fb12ff4d73e897126043bb5d03a068997a2ef (diff)
KVM: s390: add architecture compliant guest access functions
The new guest memory access function write_guest() and read_guest() can be used to access guest memory in an architecture compliant way. These functions will look at the vcpu's PSW and select the correct address space for memory access and also perform correct address wrap around. In case DAT is turned on, page tables will be walked otherwise access will happen to real or absolute memory. Any access exception will be recognized and exception data will be stored in the vcpu's kvm_vcpu_arch.pgm member. Subsequently an exception can be injected if necessary. Missing are: - key protection checks - access register mode support - program event recording support This patch also adds write_guest_real(), read_guest_real(), write_guest_absolute() and read_guest_absolute() guest functions which can be used to access real and absolute storage. These functions currently do not perform any access checks, since there is no use case (yet?). Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Reviewed-by: Thomas Huth <thuth@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r--arch/s390/kvm/Makefile4
-rw-r--r--arch/s390/kvm/gaccess.c536
-rw-r--r--arch/s390/kvm/gaccess.h170
3 files changed, 709 insertions, 1 deletions
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index d3adb37e93a4..83a7a355befe 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -11,5 +11,7 @@ common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqch
11 11
12ccflags-y := -Ivirt/kvm -Iarch/s390/kvm 12ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
13 13
14kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o diag.o 14kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o
15kvm-objs += diag.o gaccess.o
16
15obj-$(CONFIG_KVM) += kvm.o 17obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
new file mode 100644
index 000000000000..916e1ee1f8c9
--- /dev/null
+++ b/arch/s390/kvm/gaccess.c
@@ -0,0 +1,536 @@
1/*
2 * guest access functions
3 *
4 * Copyright IBM Corp. 2014
5 *
6 */
7
8#include <linux/vmalloc.h>
9#include <linux/err.h>
10#include <asm/pgtable.h>
11#include "kvm-s390.h"
12#include "gaccess.h"
13
14union asce {
15 unsigned long val;
16 struct {
17 unsigned long origin : 52; /* Region- or Segment-Table Origin */
18 unsigned long : 2;
19 unsigned long g : 1; /* Subspace Group Control */
20 unsigned long p : 1; /* Private Space Control */
21 unsigned long s : 1; /* Storage-Alteration-Event Control */
22 unsigned long x : 1; /* Space-Switch-Event Control */
23 unsigned long r : 1; /* Real-Space Control */
24 unsigned long : 1;
25 unsigned long dt : 2; /* Designation-Type Control */
26 unsigned long tl : 2; /* Region- or Segment-Table Length */
27 };
28};
29
30enum {
31 ASCE_TYPE_SEGMENT = 0,
32 ASCE_TYPE_REGION3 = 1,
33 ASCE_TYPE_REGION2 = 2,
34 ASCE_TYPE_REGION1 = 3
35};
36
37union region1_table_entry {
38 unsigned long val;
39 struct {
40 unsigned long rto: 52;/* Region-Table Origin */
41 unsigned long : 2;
42 unsigned long p : 1; /* DAT-Protection Bit */
43 unsigned long : 1;
44 unsigned long tf : 2; /* Region-Second-Table Offset */
45 unsigned long i : 1; /* Region-Invalid Bit */
46 unsigned long : 1;
47 unsigned long tt : 2; /* Table-Type Bits */
48 unsigned long tl : 2; /* Region-Second-Table Length */
49 };
50};
51
52union region2_table_entry {
53 unsigned long val;
54 struct {
55 unsigned long rto: 52;/* Region-Table Origin */
56 unsigned long : 2;
57 unsigned long p : 1; /* DAT-Protection Bit */
58 unsigned long : 1;
59 unsigned long tf : 2; /* Region-Third-Table Offset */
60 unsigned long i : 1; /* Region-Invalid Bit */
61 unsigned long : 1;
62 unsigned long tt : 2; /* Table-Type Bits */
63 unsigned long tl : 2; /* Region-Third-Table Length */
64 };
65};
66
67struct region3_table_entry_fc0 {
68 unsigned long sto: 52;/* Segment-Table Origin */
69 unsigned long : 1;
70 unsigned long fc : 1; /* Format-Control */
71 unsigned long p : 1; /* DAT-Protection Bit */
72 unsigned long : 1;
73 unsigned long tf : 2; /* Segment-Table Offset */
74 unsigned long i : 1; /* Region-Invalid Bit */
75 unsigned long cr : 1; /* Common-Region Bit */
76 unsigned long tt : 2; /* Table-Type Bits */
77 unsigned long tl : 2; /* Segment-Table Length */
78};
79
80struct region3_table_entry_fc1 {
81 unsigned long rfaa : 33; /* Region-Frame Absolute Address */
82 unsigned long : 14;
83 unsigned long av : 1; /* ACCF-Validity Control */
84 unsigned long acc: 4; /* Access-Control Bits */
85 unsigned long f : 1; /* Fetch-Protection Bit */
86 unsigned long fc : 1; /* Format-Control */
87 unsigned long p : 1; /* DAT-Protection Bit */
88 unsigned long co : 1; /* Change-Recording Override */
89 unsigned long : 2;
90 unsigned long i : 1; /* Region-Invalid Bit */
91 unsigned long cr : 1; /* Common-Region Bit */
92 unsigned long tt : 2; /* Table-Type Bits */
93 unsigned long : 2;
94};
95
96union region3_table_entry {
97 unsigned long val;
98 struct region3_table_entry_fc0 fc0;
99 struct region3_table_entry_fc1 fc1;
100 struct {
101 unsigned long : 53;
102 unsigned long fc : 1; /* Format-Control */
103 unsigned long : 4;
104 unsigned long i : 1; /* Region-Invalid Bit */
105 unsigned long cr : 1; /* Common-Region Bit */
106 unsigned long tt : 2; /* Table-Type Bits */
107 unsigned long : 2;
108 };
109};
110
111struct segment_entry_fc0 {
112 unsigned long pto: 53;/* Page-Table Origin */
113 unsigned long fc : 1; /* Format-Control */
114 unsigned long p : 1; /* DAT-Protection Bit */
115 unsigned long : 3;
116 unsigned long i : 1; /* Segment-Invalid Bit */
117 unsigned long cs : 1; /* Common-Segment Bit */
118 unsigned long tt : 2; /* Table-Type Bits */
119 unsigned long : 2;
120};
121
122struct segment_entry_fc1 {
123 unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
124 unsigned long : 3;
125 unsigned long av : 1; /* ACCF-Validity Control */
126 unsigned long acc: 4; /* Access-Control Bits */
127 unsigned long f : 1; /* Fetch-Protection Bit */
128 unsigned long fc : 1; /* Format-Control */
129 unsigned long p : 1; /* DAT-Protection Bit */
130 unsigned long co : 1; /* Change-Recording Override */
131 unsigned long : 2;
132 unsigned long i : 1; /* Segment-Invalid Bit */
133 unsigned long cs : 1; /* Common-Segment Bit */
134 unsigned long tt : 2; /* Table-Type Bits */
135 unsigned long : 2;
136};
137
138union segment_table_entry {
139 unsigned long val;
140 struct segment_entry_fc0 fc0;
141 struct segment_entry_fc1 fc1;
142 struct {
143 unsigned long : 53;
144 unsigned long fc : 1; /* Format-Control */
145 unsigned long : 4;
146 unsigned long i : 1; /* Segment-Invalid Bit */
147 unsigned long cs : 1; /* Common-Segment Bit */
148 unsigned long tt : 2; /* Table-Type Bits */
149 unsigned long : 2;
150 };
151};
152
153enum {
154 TABLE_TYPE_SEGMENT = 0,
155 TABLE_TYPE_REGION3 = 1,
156 TABLE_TYPE_REGION2 = 2,
157 TABLE_TYPE_REGION1 = 3
158};
159
160union page_table_entry {
161 unsigned long val;
162 struct {
163 unsigned long pfra : 52; /* Page-Frame Real Address */
164 unsigned long z : 1; /* Zero Bit */
165 unsigned long i : 1; /* Page-Invalid Bit */
166 unsigned long p : 1; /* DAT-Protection Bit */
167 unsigned long co : 1; /* Change-Recording Override */
168 unsigned long : 8;
169 };
170};
171
172/*
173 * vaddress union in order to easily decode a virtual address into its
174 * region first index, region second index etc. parts.
175 */
176union vaddress {
177 unsigned long addr;
178 struct {
179 unsigned long rfx : 11;
180 unsigned long rsx : 11;
181 unsigned long rtx : 11;
182 unsigned long sx : 11;
183 unsigned long px : 8;
184 unsigned long bx : 12;
185 };
186 struct {
187 unsigned long rfx01 : 2;
188 unsigned long : 9;
189 unsigned long rsx01 : 2;
190 unsigned long : 9;
191 unsigned long rtx01 : 2;
192 unsigned long : 9;
193 unsigned long sx01 : 2;
194 unsigned long : 29;
195 };
196};
197
198/*
199 * raddress union which will contain the result (real or absolute address)
200 * after a page table walk. The rfaa, sfaa and pfra members are used to
201 * simply assign them the value of a region, segment or page table entry.
202 */
203union raddress {
204 unsigned long addr;
205 unsigned long rfaa : 33; /* Region-Frame Absolute Address */
206 unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
207 unsigned long pfra : 52; /* Page-Frame Real Address */
208};
209
210static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu)
211{
212 switch (psw_bits(vcpu->arch.sie_block->gpsw).as) {
213 case PSW_AS_PRIMARY:
214 return vcpu->arch.sie_block->gcr[1];
215 case PSW_AS_SECONDARY:
216 return vcpu->arch.sie_block->gcr[7];
217 case PSW_AS_HOME:
218 return vcpu->arch.sie_block->gcr[13];
219 }
220 return 0;
221}
222
223static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
224{
225 return kvm_read_guest(kvm, gpa, val, sizeof(*val));
226}
227
228/**
229 * guest_translate - translate a guest virtual into a guest absolute address
230 * @vcpu: virtual cpu
231 * @gva: guest virtual address
232 * @gpa: points to where guest physical (absolute) address should be stored
233 * @write: indicates if access is a write access
234 *
235 * Translate a guest virtual address into a guest absolute address by means
236 * of dynamic address translation as specified by the architecuture.
237 * If the resulting absolute address is not available in the configuration
238 * an addressing exception is indicated and @gpa will not be changed.
239 *
240 * Returns: - zero on success; @gpa contains the resulting absolute address
241 * - a negative value if guest access failed due to e.g. broken
242 * guest mapping
243 * - a positve value if an access exception happened. In this case
244 * the returned value is the program interruption code as defined
245 * by the architecture
246 */
247static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
248 unsigned long *gpa, int write)
249{
250 union vaddress vaddr = {.addr = gva};
251 union raddress raddr = {.addr = gva};
252 union page_table_entry pte;
253 int dat_protection = 0;
254 union ctlreg0 ctlreg0;
255 unsigned long ptr;
256 int edat1, edat2;
257 union asce asce;
258
259 ctlreg0.val = vcpu->arch.sie_block->gcr[0];
260 edat1 = ctlreg0.edat && test_vfacility(8);
261 edat2 = edat1 && test_vfacility(78);
262 asce.val = get_vcpu_asce(vcpu);
263 if (asce.r)
264 goto real_address;
265 ptr = asce.origin * 4096;
266 switch (asce.dt) {
267 case ASCE_TYPE_REGION1:
268 if (vaddr.rfx01 > asce.tl)
269 return PGM_REGION_FIRST_TRANS;
270 ptr += vaddr.rfx * 8;
271 break;
272 case ASCE_TYPE_REGION2:
273 if (vaddr.rfx)
274 return PGM_ASCE_TYPE;
275 if (vaddr.rsx01 > asce.tl)
276 return PGM_REGION_SECOND_TRANS;
277 ptr += vaddr.rsx * 8;
278 break;
279 case ASCE_TYPE_REGION3:
280 if (vaddr.rfx || vaddr.rsx)
281 return PGM_ASCE_TYPE;
282 if (vaddr.rtx01 > asce.tl)
283 return PGM_REGION_THIRD_TRANS;
284 ptr += vaddr.rtx * 8;
285 break;
286 case ASCE_TYPE_SEGMENT:
287 if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
288 return PGM_ASCE_TYPE;
289 if (vaddr.sx01 > asce.tl)
290 return PGM_SEGMENT_TRANSLATION;
291 ptr += vaddr.sx * 8;
292 break;
293 }
294 switch (asce.dt) {
295 case ASCE_TYPE_REGION1: {
296 union region1_table_entry rfte;
297
298 if (kvm_is_error_gpa(vcpu->kvm, ptr))
299 return PGM_ADDRESSING;
300 if (deref_table(vcpu->kvm, ptr, &rfte.val))
301 return -EFAULT;
302 if (rfte.i)
303 return PGM_REGION_FIRST_TRANS;
304 if (rfte.tt != TABLE_TYPE_REGION1)
305 return PGM_TRANSLATION_SPEC;
306 if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
307 return PGM_REGION_SECOND_TRANS;
308 if (edat1)
309 dat_protection |= rfte.p;
310 ptr = rfte.rto * 4096 + vaddr.rsx * 8;
311 }
312 /* fallthrough */
313 case ASCE_TYPE_REGION2: {
314 union region2_table_entry rste;
315
316 if (kvm_is_error_gpa(vcpu->kvm, ptr))
317 return PGM_ADDRESSING;
318 if (deref_table(vcpu->kvm, ptr, &rste.val))
319 return -EFAULT;
320 if (rste.i)
321 return PGM_REGION_SECOND_TRANS;
322 if (rste.tt != TABLE_TYPE_REGION2)
323 return PGM_TRANSLATION_SPEC;
324 if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
325 return PGM_REGION_THIRD_TRANS;
326 if (edat1)
327 dat_protection |= rste.p;
328 ptr = rste.rto * 4096 + vaddr.rtx * 8;
329 }
330 /* fallthrough */
331 case ASCE_TYPE_REGION3: {
332 union region3_table_entry rtte;
333
334 if (kvm_is_error_gpa(vcpu->kvm, ptr))
335 return PGM_ADDRESSING;
336 if (deref_table(vcpu->kvm, ptr, &rtte.val))
337 return -EFAULT;
338 if (rtte.i)
339 return PGM_REGION_THIRD_TRANS;
340 if (rtte.tt != TABLE_TYPE_REGION3)
341 return PGM_TRANSLATION_SPEC;
342 if (rtte.cr && asce.p && edat2)
343 return PGM_TRANSLATION_SPEC;
344 if (rtte.fc && edat2) {
345 dat_protection |= rtte.fc1.p;
346 raddr.rfaa = rtte.fc1.rfaa;
347 goto absolute_address;
348 }
349 if (vaddr.sx01 < rtte.fc0.tf)
350 return PGM_SEGMENT_TRANSLATION;
351 if (vaddr.sx01 > rtte.fc0.tl)
352 return PGM_SEGMENT_TRANSLATION;
353 if (edat1)
354 dat_protection |= rtte.fc0.p;
355 ptr = rtte.fc0.sto * 4096 + vaddr.sx * 8;
356 }
357 /* fallthrough */
358 case ASCE_TYPE_SEGMENT: {
359 union segment_table_entry ste;
360
361 if (kvm_is_error_gpa(vcpu->kvm, ptr))
362 return PGM_ADDRESSING;
363 if (deref_table(vcpu->kvm, ptr, &ste.val))
364 return -EFAULT;
365 if (ste.i)
366 return PGM_SEGMENT_TRANSLATION;
367 if (ste.tt != TABLE_TYPE_SEGMENT)
368 return PGM_TRANSLATION_SPEC;
369 if (ste.cs && asce.p)
370 return PGM_TRANSLATION_SPEC;
371 if (ste.fc && edat1) {
372 dat_protection |= ste.fc1.p;
373 raddr.sfaa = ste.fc1.sfaa;
374 goto absolute_address;
375 }
376 dat_protection |= ste.fc0.p;
377 ptr = ste.fc0.pto * 2048 + vaddr.px * 8;
378 }
379 }
380 if (kvm_is_error_gpa(vcpu->kvm, ptr))
381 return PGM_ADDRESSING;
382 if (deref_table(vcpu->kvm, ptr, &pte.val))
383 return -EFAULT;
384 if (pte.i)
385 return PGM_PAGE_TRANSLATION;
386 if (pte.z)
387 return PGM_TRANSLATION_SPEC;
388 if (pte.co && !edat1)
389 return PGM_TRANSLATION_SPEC;
390 dat_protection |= pte.p;
391 raddr.pfra = pte.pfra;
392real_address:
393 raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
394absolute_address:
395 if (write && dat_protection)
396 return PGM_PROTECTION;
397 if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
398 return PGM_ADDRESSING;
399 *gpa = raddr.addr;
400 return 0;
401}
402
403static inline int is_low_address(unsigned long ga)
404{
405 /* Check for address ranges 0..511 and 4096..4607 */
406 return (ga & ~0x11fful) == 0;
407}
408
409static int low_address_protection_enabled(struct kvm_vcpu *vcpu)
410{
411 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
412 psw_t *psw = &vcpu->arch.sie_block->gpsw;
413 union asce asce;
414
415 if (!ctlreg0.lap)
416 return 0;
417 asce.val = get_vcpu_asce(vcpu);
418 if (psw_bits(*psw).t && asce.p)
419 return 0;
420 return 1;
421}
422
423struct trans_exc_code_bits {
424 unsigned long addr : 52; /* Translation-exception Address */
425 unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
426 unsigned long : 7;
427 unsigned long b61 : 1;
428 unsigned long as : 2; /* ASCE Identifier */
429};
430
431enum {
432 FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
433 FSI_STORE = 1, /* Exception was due to store operation */
434 FSI_FETCH = 2 /* Exception was due to fetch operation */
435};
436
437static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
438 unsigned long *pages, unsigned long nr_pages,
439 int write)
440{
441 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
442 psw_t *psw = &vcpu->arch.sie_block->gpsw;
443 struct trans_exc_code_bits *tec_bits;
444 int lap_enabled, rc;
445
446 memset(pgm, 0, sizeof(*pgm));
447 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
448 tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
449 tec_bits->as = psw_bits(*psw).as;
450 lap_enabled = low_address_protection_enabled(vcpu);
451 while (nr_pages) {
452 ga = kvm_s390_logical_to_effective(vcpu, ga);
453 tec_bits->addr = ga >> PAGE_SHIFT;
454 if (write && lap_enabled && is_low_address(ga)) {
455 pgm->code = PGM_PROTECTION;
456 return pgm->code;
457 }
458 ga &= PAGE_MASK;
459 if (psw_bits(*psw).t) {
460 rc = guest_translate(vcpu, ga, pages, write);
461 if (rc < 0)
462 return rc;
463 if (rc == PGM_PROTECTION)
464 tec_bits->b61 = 1;
465 if (rc)
466 pgm->code = rc;
467 } else {
468 *pages = kvm_s390_real_to_abs(vcpu, ga);
469 if (kvm_is_error_gpa(vcpu->kvm, *pages))
470 pgm->code = PGM_ADDRESSING;
471 }
472 if (pgm->code)
473 return pgm->code;
474 ga += PAGE_SIZE;
475 pages++;
476 nr_pages--;
477 }
478 return 0;
479}
480
481int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
482 unsigned long len, int write)
483{
484 psw_t *psw = &vcpu->arch.sie_block->gpsw;
485 unsigned long _len, nr_pages, gpa, idx;
486 unsigned long pages_array[2];
487 unsigned long *pages;
488 int rc;
489
490 if (!len)
491 return 0;
492 /* Access register mode is not supported yet. */
493 if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
494 return -EOPNOTSUPP;
495 nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
496 pages = pages_array;
497 if (nr_pages > ARRAY_SIZE(pages_array))
498 pages = vmalloc(nr_pages * sizeof(unsigned long));
499 if (!pages)
500 return -ENOMEM;
501 rc = guest_page_range(vcpu, ga, pages, nr_pages, write);
502 for (idx = 0; idx < nr_pages && !rc; idx++) {
503 gpa = *(pages + idx) + (ga & ~PAGE_MASK);
504 _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
505 if (write)
506 rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
507 else
508 rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
509 len -= _len;
510 ga += _len;
511 data += _len;
512 }
513 if (nr_pages > ARRAY_SIZE(pages_array))
514 vfree(pages);
515 return rc;
516}
517
518int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
519 void *data, unsigned long len, int write)
520{
521 unsigned long _len, gpa;
522 int rc = 0;
523
524 while (len && !rc) {
525 gpa = kvm_s390_real_to_abs(vcpu, gra);
526 _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
527 if (write)
528 rc = write_guest_abs(vcpu, gpa, data, _len);
529 else
530 rc = read_guest_abs(vcpu, gpa, data, _len);
531 len -= _len;
532 gra += _len;
533 data += _len;
534 }
535 return rc;
536}
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 917aeaa04fff..21ee62cd948e 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -227,4 +227,174 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
227 227
228 return kvm_read_guest(vcpu->kvm, gpa, data, len); 228 return kvm_read_guest(vcpu->kvm, gpa, data, len);
229} 229}
230
231int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
232 unsigned long len, int write);
233
234int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
235 void *data, unsigned long len, int write);
236
237/**
238 * write_guest - copy data from kernel space to guest space
239 * @vcpu: virtual cpu
240 * @ga: guest address
241 * @data: source address in kernel space
242 * @len: number of bytes to copy
243 *
244 * Copy @len bytes from @data (kernel space) to @ga (guest address).
245 * In order to copy data to guest space the PSW of the vcpu is inspected:
246 * If DAT is off data will be copied to guest real or absolute memory.
247 * If DAT is on data will be copied to the address space as specified by
248 * the address space bits of the PSW:
249 * Primary, secondory or home space (access register mode is currently not
250 * implemented).
251 * The addressing mode of the PSW is also inspected, so that address wrap
252 * around is taken into account for 24-, 31- and 64-bit addressing mode,
253 * if the to be copied data crosses page boundaries in guest address space.
254 * In addition also low address and DAT protection are inspected before
255 * copying any data (key protection is currently not implemented).
256 *
257 * This function modifies the 'struct kvm_s390_pgm_info pgm' member of @vcpu.
258 * In case of an access exception (e.g. protection exception) pgm will contain
259 * all data necessary so that a subsequent call to 'kvm_s390_inject_prog_vcpu()'
260 * will inject a correct exception into the guest.
261 * If no access exception happened, the contents of pgm are undefined when
262 * this function returns.
263 *
264 * Returns: - zero on success
265 * - a negative value if e.g. the guest mapping is broken or in
266 * case of out-of-memory. In this case the contents of pgm are
267 * undefined. Also parts of @data may have been copied to guest
268 * space.
269 * - a positive value if an access exception happened. In this case
270 * the returned value is the program interruption code and the
271 * contents of pgm may be used to inject an exception into the
272 * guest. No data has been copied to guest space.
273 *
274 * Note: in case an access exception is recognized no data has been copied to
275 * guest space (this is also true, if the to be copied data would cross
276 * one or more page boundaries in guest space).
277 * Therefore this function may be used for nullifying and suppressing
278 * instruction emulation.
279 * It may also be used for terminating instructions, if it is undefined
280 * if data has been changed in guest space in case of an exception.
281 */
282static inline __must_check
283int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
284 unsigned long len)
285{
286 return access_guest(vcpu, ga, data, len, 1);
287}
288
289/**
290 * read_guest - copy data from guest space to kernel space
291 * @vcpu: virtual cpu
292 * @ga: guest address
293 * @data: destination address in kernel space
294 * @len: number of bytes to copy
295 *
296 * Copy @len bytes from @ga (guest address) to @data (kernel space).
297 *
298 * The behaviour of read_guest is identical to write_guest, except that
299 * data will be copied from guest space to kernel space.
300 */
301static inline __must_check
302int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
303 unsigned long len)
304{
305 return access_guest(vcpu, ga, data, len, 0);
306}
307
308/**
309 * write_guest_abs - copy data from kernel space to guest space absolute
310 * @vcpu: virtual cpu
311 * @gpa: guest physical (absolute) address
312 * @data: source address in kernel space
313 * @len: number of bytes to copy
314 *
315 * Copy @len bytes from @data (kernel space) to @gpa (guest absolute address).
316 * It is up to the caller to ensure that the entire guest memory range is
317 * valid memory before calling this function.
318 * Guest low address and key protection are not checked.
319 *
320 * Returns zero on success or -EFAULT on error.
321 *
322 * If an error occurs data may have been copied partially to guest memory.
323 */
324static inline __must_check
325int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
326 unsigned long len)
327{
328 return kvm_write_guest(vcpu->kvm, gpa, data, len);
329}
330
331/**
332 * read_guest_abs - copy data from guest space absolute to kernel space
333 * @vcpu: virtual cpu
334 * @gpa: guest physical (absolute) address
335 * @data: destination address in kernel space
336 * @len: number of bytes to copy
337 *
338 * Copy @len bytes from @gpa (guest absolute address) to @data (kernel space).
339 * It is up to the caller to ensure that the entire guest memory range is
340 * valid memory before calling this function.
341 * Guest key protection is not checked.
342 *
343 * Returns zero on success or -EFAULT on error.
344 *
345 * If an error occurs data may have been copied partially to kernel space.
346 */
347static inline __must_check
348int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
349 unsigned long len)
350{
351 return kvm_read_guest(vcpu->kvm, gpa, data, len);
352}
353
354/**
355 * write_guest_real - copy data from kernel space to guest space real
356 * @vcpu: virtual cpu
357 * @gra: guest real address
358 * @data: source address in kernel space
359 * @len: number of bytes to copy
360 *
361 * Copy @len bytes from @data (kernel space) to @gra (guest real address).
362 * It is up to the caller to ensure that the entire guest memory range is
363 * valid memory before calling this function.
364 * Guest low address and key protection are not checked.
365 *
366 * Returns zero on success or -EFAULT on error.
367 *
368 * If an error occurs data may have been copied partially to guest memory.
369 */
370static inline __must_check
371int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
372 unsigned long len)
373{
374 return access_guest_real(vcpu, gra, data, len, 1);
375}
376
377/**
378 * read_guest_real - copy data from guest space real to kernel space
379 * @vcpu: virtual cpu
380 * @gra: guest real address
381 * @data: destination address in kernel space
382 * @len: number of bytes to copy
383 *
384 * Copy @len bytes from @gra (guest real address) to @data (kernel space).
385 * It is up to the caller to ensure that the entire guest memory range is
386 * valid memory before calling this function.
387 * Guest key protection is not checked.
388 *
389 * Returns zero on success or -EFAULT on error.
390 *
391 * If an error occurs data may have been copied partially to kernel space.
392 */
393static inline __must_check
394int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
395 unsigned long len)
396{
397 return access_guest_real(vcpu, gra, data, len, 0);
398}
399
230#endif /* __KVM_S390_GACCESS_H */ 400#endif /* __KVM_S390_GACCESS_H */