aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Yarygin <yarygin@linux.vnet.ibm.com>2015-03-09 07:17:25 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2015-03-17 11:25:57 -0400
commit664b4973537068402954bee6e2959b858f263a6f (patch)
tree53ad45b61cd6a279d0c920dd807dc43fd81e8f2d
parent75a1812230ad7ad16e5a06b5ef2220f765b12da5 (diff)
KVM: s390: Add access register mode
Access register mode is one of the modes that control dynamic address translation. In this mode the address space is specified by values of the access registers. The effective address-space-control element is obtained from the result of the access register translation. See the "Access-Register Introduction" section of the chapter 5 "Program Execution" in "Principles of Operations" for more details. Signed-off-by: Alexander Yarygin <yarygin@linux.vnet.ibm.com> Reviewed-by: Thomas Huth <thuth@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r--arch/s390/kvm/gaccess.c234
-rw-r--r--arch/s390/kvm/gaccess.h3
2 files changed, 202 insertions, 35 deletions
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index c74462a12c6d..ea38d716e24d 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -10,6 +10,7 @@
10#include <asm/pgtable.h> 10#include <asm/pgtable.h>
11#include "kvm-s390.h" 11#include "kvm-s390.h"
12#include "gaccess.h" 12#include "gaccess.h"
13#include <asm/switch_to.h>
13 14
14union asce { 15union asce {
15 unsigned long val; 16 unsigned long val;
@@ -207,6 +208,54 @@ union raddress {
207 unsigned long pfra : 52; /* Page-Frame Real Address */ 208 unsigned long pfra : 52; /* Page-Frame Real Address */
208}; 209};
209 210
211union alet {
212 u32 val;
213 struct {
214 u32 reserved : 7;
215 u32 p : 1;
216 u32 alesn : 8;
217 u32 alen : 16;
218 };
219};
220
221union ald {
222 u32 val;
223 struct {
224 u32 : 1;
225 u32 alo : 24;
226 u32 all : 7;
227 };
228};
229
230struct ale {
231 unsigned long i : 1; /* ALEN-Invalid Bit */
232 unsigned long : 5;
233 unsigned long fo : 1; /* Fetch-Only Bit */
234 unsigned long p : 1; /* Private Bit */
235 unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
236 unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
237 unsigned long : 32;
238 unsigned long : 1;
239 unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
240 unsigned long : 6;
241 unsigned long astesn : 32; /* ASTE Sequence Number */
242} __packed;
243
244struct aste {
245 unsigned long i : 1; /* ASX-Invalid Bit */
246 unsigned long ato : 29; /* Authority-Table Origin */
247 unsigned long : 1;
248 unsigned long b : 1; /* Base-Space Bit */
249 unsigned long ax : 16; /* Authorization Index */
250 unsigned long atl : 12; /* Authority-Table Length */
251 unsigned long : 2;
252 unsigned long ca : 1; /* Controlled-ASN Bit */
253 unsigned long ra : 1; /* Reusable-ASN Bit */
254 unsigned long asce : 64; /* Address-Space-Control Element */
255 unsigned long ald : 32;
256 unsigned long astesn : 32;
257 /* .. more fields there */
258} __packed;
210 259
211int ipte_lock_held(struct kvm_vcpu *vcpu) 260int ipte_lock_held(struct kvm_vcpu *vcpu)
212{ 261{
@@ -307,15 +356,157 @@ void ipte_unlock(struct kvm_vcpu *vcpu)
307 ipte_unlock_simple(vcpu); 356 ipte_unlock_simple(vcpu);
308} 357}
309 358
310static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu) 359static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
360 int write)
361{
362 union alet alet;
363 struct ale ale;
364 struct aste aste;
365 unsigned long ald_addr, authority_table_addr;
366 union ald ald;
367 int eax, rc;
368 u8 authority_table;
369
370 if (ar >= NUM_ACRS)
371 return -EINVAL;
372
373 save_access_regs(vcpu->run->s.regs.acrs);
374 alet.val = vcpu->run->s.regs.acrs[ar];
375
376 if (ar == 0 || alet.val == 0) {
377 asce->val = vcpu->arch.sie_block->gcr[1];
378 return 0;
379 } else if (alet.val == 1) {
380 asce->val = vcpu->arch.sie_block->gcr[7];
381 return 0;
382 }
383
384 if (alet.reserved)
385 return PGM_ALET_SPECIFICATION;
386
387 if (alet.p)
388 ald_addr = vcpu->arch.sie_block->gcr[5];
389 else
390 ald_addr = vcpu->arch.sie_block->gcr[2];
391 ald_addr &= 0x7fffffc0;
392
393 rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
394 if (rc)
395 return rc;
396
397 if (alet.alen / 8 > ald.all)
398 return PGM_ALEN_TRANSLATION;
399
400 if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
401 return PGM_ADDRESSING;
402
403 rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
404 sizeof(struct ale));
405 if (rc)
406 return rc;
407
408 if (ale.i == 1)
409 return PGM_ALEN_TRANSLATION;
410 if (ale.alesn != alet.alesn)
411 return PGM_ALE_SEQUENCE;
412
413 rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
414 if (rc)
415 return rc;
416
417 if (aste.i)
418 return PGM_ASTE_VALIDITY;
419 if (aste.astesn != ale.astesn)
420 return PGM_ASTE_SEQUENCE;
421
422 if (ale.p == 1) {
423 eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
424 if (ale.aleax != eax) {
425 if (eax / 16 > aste.atl)
426 return PGM_EXTENDED_AUTHORITY;
427
428 authority_table_addr = aste.ato * 4 + eax / 4;
429
430 rc = read_guest_real(vcpu, authority_table_addr,
431 &authority_table,
432 sizeof(u8));
433 if (rc)
434 return rc;
435
436 if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
437 return PGM_EXTENDED_AUTHORITY;
438 }
439 }
440
441 if (ale.fo == 1 && write)
442 return PGM_PROTECTION;
443
444 asce->val = aste.asce;
445 return 0;
446}
447
448struct trans_exc_code_bits {
449 unsigned long addr : 52; /* Translation-exception Address */
450 unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
451 unsigned long : 6;
452 unsigned long b60 : 1;
453 unsigned long b61 : 1;
454 unsigned long as : 2; /* ASCE Identifier */
455};
456
457enum {
458 FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
459 FSI_STORE = 1, /* Exception was due to store operation */
460 FSI_FETCH = 2 /* Exception was due to fetch operation */
461};
462
463static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
464 ar_t ar, int write)
311{ 465{
466 int rc;
467 psw_t *psw = &vcpu->arch.sie_block->gpsw;
468 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
469 struct trans_exc_code_bits *tec_bits;
470
471 memset(pgm, 0, sizeof(*pgm));
472 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
473 tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
474 tec_bits->as = psw_bits(*psw).as;
475
476 if (!psw_bits(*psw).t) {
477 asce->val = 0;
478 asce->r = 1;
479 return 0;
480 }
481
312 switch (psw_bits(vcpu->arch.sie_block->gpsw).as) { 482 switch (psw_bits(vcpu->arch.sie_block->gpsw).as) {
313 case PSW_AS_PRIMARY: 483 case PSW_AS_PRIMARY:
314 return vcpu->arch.sie_block->gcr[1]; 484 asce->val = vcpu->arch.sie_block->gcr[1];
485 return 0;
315 case PSW_AS_SECONDARY: 486 case PSW_AS_SECONDARY:
316 return vcpu->arch.sie_block->gcr[7]; 487 asce->val = vcpu->arch.sie_block->gcr[7];
488 return 0;
317 case PSW_AS_HOME: 489 case PSW_AS_HOME:
318 return vcpu->arch.sie_block->gcr[13]; 490 asce->val = vcpu->arch.sie_block->gcr[13];
491 return 0;
492 case PSW_AS_ACCREG:
493 rc = ar_translation(vcpu, asce, ar, write);
494 switch (rc) {
495 case PGM_ALEN_TRANSLATION:
496 case PGM_ALE_SEQUENCE:
497 case PGM_ASTE_VALIDITY:
498 case PGM_ASTE_SEQUENCE:
499 case PGM_EXTENDED_AUTHORITY:
500 vcpu->arch.pgm.exc_access_id = ar;
501 break;
502 case PGM_PROTECTION:
503 tec_bits->b60 = 1;
504 tec_bits->b61 = 1;
505 break;
506 }
507 if (rc > 0)
508 pgm->code = rc;
509 return rc;
319 } 510 }
320 return 0; 511 return 0;
321} 512}
@@ -519,20 +710,6 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
519 return 1; 710 return 1;
520} 711}
521 712
522struct trans_exc_code_bits {
523 unsigned long addr : 52; /* Translation-exception Address */
524 unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
525 unsigned long : 7;
526 unsigned long b61 : 1;
527 unsigned long as : 2; /* ASCE Identifier */
528};
529
530enum {
531 FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
532 FSI_STORE = 1, /* Exception was due to store operation */
533 FSI_FETCH = 2 /* Exception was due to fetch operation */
534};
535
536static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, 713static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
537 unsigned long *pages, unsigned long nr_pages, 714 unsigned long *pages, unsigned long nr_pages,
538 const union asce asce, int write) 715 const union asce asce, int write)
@@ -542,10 +719,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
542 struct trans_exc_code_bits *tec_bits; 719 struct trans_exc_code_bits *tec_bits;
543 int lap_enabled, rc; 720 int lap_enabled, rc;
544 721
545 memset(pgm, 0, sizeof(*pgm));
546 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; 722 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
547 tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
548 tec_bits->as = psw_bits(*psw).as;
549 lap_enabled = low_address_protection_enabled(vcpu, asce); 723 lap_enabled = low_address_protection_enabled(vcpu, asce);
550 while (nr_pages) { 724 while (nr_pages) {
551 ga = kvm_s390_logical_to_effective(vcpu, ga); 725 ga = kvm_s390_logical_to_effective(vcpu, ga);
@@ -590,16 +764,15 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
590 764
591 if (!len) 765 if (!len)
592 return 0; 766 return 0;
593 /* Access register mode is not supported yet. */ 767 rc = get_vcpu_asce(vcpu, &asce, ar, write);
594 if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG) 768 if (rc)
595 return -EOPNOTSUPP; 769 return rc;
596 nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; 770 nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
597 pages = pages_array; 771 pages = pages_array;
598 if (nr_pages > ARRAY_SIZE(pages_array)) 772 if (nr_pages > ARRAY_SIZE(pages_array))
599 pages = vmalloc(nr_pages * sizeof(unsigned long)); 773 pages = vmalloc(nr_pages * sizeof(unsigned long));
600 if (!pages) 774 if (!pages)
601 return -ENOMEM; 775 return -ENOMEM;
602 asce.val = get_vcpu_asce(vcpu);
603 need_ipte_lock = psw_bits(*psw).t && !asce.r; 776 need_ipte_lock = psw_bits(*psw).t && !asce.r;
604 if (need_ipte_lock) 777 if (need_ipte_lock)
605 ipte_lock(vcpu); 778 ipte_lock(vcpu);
@@ -660,17 +833,12 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
660 union asce asce; 833 union asce asce;
661 int rc; 834 int rc;
662 835
663 /* Access register mode is not supported yet. */
664 if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
665 return -EOPNOTSUPP;
666
667 gva = kvm_s390_logical_to_effective(vcpu, gva); 836 gva = kvm_s390_logical_to_effective(vcpu, gva);
668 memset(pgm, 0, sizeof(*pgm));
669 tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code; 837 tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
670 tec->as = psw_bits(*psw).as; 838 rc = get_vcpu_asce(vcpu, &asce, ar, write);
671 tec->fsi = write ? FSI_STORE : FSI_FETCH;
672 tec->addr = gva >> PAGE_SHIFT; 839 tec->addr = gva >> PAGE_SHIFT;
673 asce.val = get_vcpu_asce(vcpu); 840 if (rc)
841 return rc;
674 if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) { 842 if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
675 if (write) { 843 if (write) {
676 rc = pgm->code = PGM_PROTECTION; 844 rc = pgm->code = PGM_PROTECTION;
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 7c2866bfa63f..835e557dabf4 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -177,8 +177,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
177 * If DAT is off data will be copied to guest real or absolute memory. 177 * If DAT is off data will be copied to guest real or absolute memory.
178 * If DAT is on data will be copied to the address space as specified by 178 * If DAT is on data will be copied to the address space as specified by
179 * the address space bits of the PSW: 179 * the address space bits of the PSW:
180 * Primary, secondory or home space (access register mode is currently not 180 * Primary, secondary, home space or access register mode.
181 * implemented).
182 * The addressing mode of the PSW is also inspected, so that address wrap 181 * The addressing mode of the PSW is also inspected, so that address wrap
183 * around is taken into account for 24-, 31- and 64-bit addressing mode, 182 * around is taken into account for 24-, 31- and 64-bit addressing mode,
184 * if the to be copied data crosses page boundaries in guest address space. 183 * if the to be copied data crosses page boundaries in guest address space.