aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Yarygin <yarygin@linux.vnet.ibm.com>2015-01-22 04:44:11 -0500
committerChristian Borntraeger <borntraeger@de.ibm.com>2015-03-17 11:25:31 -0400
commit75a1812230ad7ad16e5a06b5ef2220f765b12da5 (patch)
tree8272f8ea8d732d45f6eca9d403aebec415c2391d
parent8ae04b8f500b9f46652c63431bf658223d875597 (diff)
KVM: s390: Optimize paths where get_vcpu_asce() is invoked
During dynamic address translation the get_vcpu_asce() function can be invoked several times. It's ok for usual modes, but will be slow if CPUs are in AR mode. Let's call the get_vcpu_asce() once and pass the result to the called functions. Signed-off-by: Alexander Yarygin <yarygin@linux.vnet.ibm.com> Reviewed-by: Thomas Huth <thuth@linux.vnet.ibm.com> Reviewed-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Acked-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r--arch/s390/kvm/gaccess.c25
1 files changed, 12 insertions, 13 deletions
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 494131eda8c1..c74462a12c6d 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -330,6 +330,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
330 * @vcpu: virtual cpu 330 * @vcpu: virtual cpu
331 * @gva: guest virtual address 331 * @gva: guest virtual address
332 * @gpa: points to where guest physical (absolute) address should be stored 332 * @gpa: points to where guest physical (absolute) address should be stored
333 * @asce: effective asce
333 * @write: indicates if access is a write access 334 * @write: indicates if access is a write access
334 * 335 *
335 * Translate a guest virtual address into a guest absolute address by means 336 * Translate a guest virtual address into a guest absolute address by means
@@ -345,7 +346,8 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
345 * by the architecture 346 * by the architecture
346 */ 347 */
347static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, 348static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
348 unsigned long *gpa, int write) 349 unsigned long *gpa, const union asce asce,
350 int write)
349{ 351{
350 union vaddress vaddr = {.addr = gva}; 352 union vaddress vaddr = {.addr = gva};
351 union raddress raddr = {.addr = gva}; 353 union raddress raddr = {.addr = gva};
@@ -354,12 +356,10 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
354 union ctlreg0 ctlreg0; 356 union ctlreg0 ctlreg0;
355 unsigned long ptr; 357 unsigned long ptr;
356 int edat1, edat2; 358 int edat1, edat2;
357 union asce asce;
358 359
359 ctlreg0.val = vcpu->arch.sie_block->gcr[0]; 360 ctlreg0.val = vcpu->arch.sie_block->gcr[0];
360 edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8); 361 edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
361 edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78); 362 edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
362 asce.val = get_vcpu_asce(vcpu);
363 if (asce.r) 363 if (asce.r)
364 goto real_address; 364 goto real_address;
365 ptr = asce.origin * 4096; 365 ptr = asce.origin * 4096;
@@ -506,15 +506,14 @@ static inline int is_low_address(unsigned long ga)
506 return (ga & ~0x11fful) == 0; 506 return (ga & ~0x11fful) == 0;
507} 507}
508 508
509static int low_address_protection_enabled(struct kvm_vcpu *vcpu) 509static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
510 const union asce asce)
510{ 511{
511 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; 512 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
512 psw_t *psw = &vcpu->arch.sie_block->gpsw; 513 psw_t *psw = &vcpu->arch.sie_block->gpsw;
513 union asce asce;
514 514
515 if (!ctlreg0.lap) 515 if (!ctlreg0.lap)
516 return 0; 516 return 0;
517 asce.val = get_vcpu_asce(vcpu);
518 if (psw_bits(*psw).t && asce.p) 517 if (psw_bits(*psw).t && asce.p)
519 return 0; 518 return 0;
520 return 1; 519 return 1;
@@ -536,7 +535,7 @@ enum {
536 535
537static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, 536static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
538 unsigned long *pages, unsigned long nr_pages, 537 unsigned long *pages, unsigned long nr_pages,
539 int write) 538 const union asce asce, int write)
540{ 539{
541 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; 540 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
542 psw_t *psw = &vcpu->arch.sie_block->gpsw; 541 psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -547,7 +546,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
547 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; 546 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
548 tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; 547 tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
549 tec_bits->as = psw_bits(*psw).as; 548 tec_bits->as = psw_bits(*psw).as;
550 lap_enabled = low_address_protection_enabled(vcpu); 549 lap_enabled = low_address_protection_enabled(vcpu, asce);
551 while (nr_pages) { 550 while (nr_pages) {
552 ga = kvm_s390_logical_to_effective(vcpu, ga); 551 ga = kvm_s390_logical_to_effective(vcpu, ga);
553 tec_bits->addr = ga >> PAGE_SHIFT; 552 tec_bits->addr = ga >> PAGE_SHIFT;
@@ -557,7 +556,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
557 } 556 }
558 ga &= PAGE_MASK; 557 ga &= PAGE_MASK;
559 if (psw_bits(*psw).t) { 558 if (psw_bits(*psw).t) {
560 rc = guest_translate(vcpu, ga, pages, write); 559 rc = guest_translate(vcpu, ga, pages, asce, write);
561 if (rc < 0) 560 if (rc < 0)
562 return rc; 561 return rc;
563 if (rc == PGM_PROTECTION) 562 if (rc == PGM_PROTECTION)
@@ -604,7 +603,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
604 need_ipte_lock = psw_bits(*psw).t && !asce.r; 603 need_ipte_lock = psw_bits(*psw).t && !asce.r;
605 if (need_ipte_lock) 604 if (need_ipte_lock)
606 ipte_lock(vcpu); 605 ipte_lock(vcpu);
607 rc = guest_page_range(vcpu, ga, pages, nr_pages, write); 606 rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write);
608 for (idx = 0; idx < nr_pages && !rc; idx++) { 607 for (idx = 0; idx < nr_pages && !rc; idx++) {
609 gpa = *(pages + idx) + (ga & ~PAGE_MASK); 608 gpa = *(pages + idx) + (ga & ~PAGE_MASK);
610 _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); 609 _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
@@ -671,16 +670,16 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
671 tec->as = psw_bits(*psw).as; 670 tec->as = psw_bits(*psw).as;
672 tec->fsi = write ? FSI_STORE : FSI_FETCH; 671 tec->fsi = write ? FSI_STORE : FSI_FETCH;
673 tec->addr = gva >> PAGE_SHIFT; 672 tec->addr = gva >> PAGE_SHIFT;
674 if (is_low_address(gva) && low_address_protection_enabled(vcpu)) { 673 asce.val = get_vcpu_asce(vcpu);
674 if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
675 if (write) { 675 if (write) {
676 rc = pgm->code = PGM_PROTECTION; 676 rc = pgm->code = PGM_PROTECTION;
677 return rc; 677 return rc;
678 } 678 }
679 } 679 }
680 680
681 asce.val = get_vcpu_asce(vcpu);
682 if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */ 681 if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */
683 rc = guest_translate(vcpu, gva, gpa, write); 682 rc = guest_translate(vcpu, gva, gpa, asce, write);
684 if (rc > 0) { 683 if (rc > 0) {
685 if (rc == PGM_PROTECTION) 684 if (rc == PGM_PROTECTION)
686 tec->b61 = 1; 685 tec->b61 = 1;