aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2016-05-31 14:13:35 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2016-06-10 06:07:15 -0400
commitcde0dcfb5df1dbcd90a8e73130a6b7091bdb493a (patch)
treee479b44497ee76a26da68eda649f69df3be711b9 /arch/s390/kvm
parentfbcb7d5157718645cc198c6be6b435ab326c1892 (diff)
KVM: s390: gaccess: convert guest_page_range()
Let's use our new function for preparing translation exceptions. As we will need the correct ar, let's pass that to guest_page_range(). This will also make sure that the guest address is stored in the tec for applicable excptions. Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/gaccess.c27
1 files changed, 9 insertions, 18 deletions
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index ae9f9e8e063c..ec6c91e85dbe 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -792,40 +792,31 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
792 return 1; 792 return 1;
793} 793}
794 794
795static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, 795static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar,
796 unsigned long *pages, unsigned long nr_pages, 796 unsigned long *pages, unsigned long nr_pages,
797 const union asce asce, enum gacc_mode mode) 797 const union asce asce, enum gacc_mode mode)
798{ 798{
799 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
800 psw_t *psw = &vcpu->arch.sie_block->gpsw; 799 psw_t *psw = &vcpu->arch.sie_block->gpsw;
801 struct trans_exc_code_bits *tec_bits; 800 int lap_enabled, rc = 0;
802 int lap_enabled, rc;
803 801
804 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
805 lap_enabled = low_address_protection_enabled(vcpu, asce); 802 lap_enabled = low_address_protection_enabled(vcpu, asce);
806 while (nr_pages) { 803 while (nr_pages) {
807 ga = kvm_s390_logical_to_effective(vcpu, ga); 804 ga = kvm_s390_logical_to_effective(vcpu, ga);
808 tec_bits->addr = ga >> PAGE_SHIFT; 805 if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
809 if (mode == GACC_STORE && lap_enabled && is_low_address(ga)) { 806 return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
810 pgm->code = PGM_PROTECTION; 807 PROT_TYPE_LA);
811 return pgm->code;
812 }
813 ga &= PAGE_MASK; 808 ga &= PAGE_MASK;
814 if (psw_bits(*psw).t) { 809 if (psw_bits(*psw).t) {
815 rc = guest_translate(vcpu, ga, pages, asce, mode); 810 rc = guest_translate(vcpu, ga, pages, asce, mode);
816 if (rc < 0) 811 if (rc < 0)
817 return rc; 812 return rc;
818 if (rc == PGM_PROTECTION)
819 tec_bits->b61 = 1;
820 if (rc)
821 pgm->code = rc;
822 } else { 813 } else {
823 *pages = kvm_s390_real_to_abs(vcpu, ga); 814 *pages = kvm_s390_real_to_abs(vcpu, ga);
824 if (kvm_is_error_gpa(vcpu->kvm, *pages)) 815 if (kvm_is_error_gpa(vcpu->kvm, *pages))
825 pgm->code = PGM_ADDRESSING; 816 rc = PGM_ADDRESSING;
826 } 817 }
827 if (pgm->code) 818 if (rc)
828 return pgm->code; 819 return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_DAT);
829 ga += PAGE_SIZE; 820 ga += PAGE_SIZE;
830 pages++; 821 pages++;
831 nr_pages--; 822 nr_pages--;
@@ -859,7 +850,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
859 need_ipte_lock = psw_bits(*psw).t && !asce.r; 850 need_ipte_lock = psw_bits(*psw).t && !asce.r;
860 if (need_ipte_lock) 851 if (need_ipte_lock)
861 ipte_lock(vcpu); 852 ipte_lock(vcpu);
862 rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, mode); 853 rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode);
863 for (idx = 0; idx < nr_pages && !rc; idx++) { 854 for (idx = 0; idx < nr_pages && !rc; idx++) {
864 gpa = *(pages + idx) + (ga & ~PAGE_MASK); 855 gpa = *(pages + idx) + (ga & ~PAGE_MASK);
865 _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); 856 _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);