aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorAlexander Yarygin <yarygin@linux.vnet.ibm.com>2015-03-03 06:26:14 -0500
committerChristian Borntraeger <borntraeger@de.ibm.com>2015-03-17 11:24:38 -0400
commitdd9e5b7bdba3250c075a212ff632d31edfa91ae7 (patch)
tree06666c57566175f67091bf509c36c678fa8e82a6 /arch/s390/kvm
parent40f5b735e867b8fd3e6090f5a184950c68d227bb (diff)
KVM: s390: Fix low-address protection for real addresses
The kvm_s390_check_low_addr_protection() function is used only with real addresses. According to the POP (the "Low-Address Protection" paragraph in chapter 3), if the effective address is real or absolute, the low-address protection procedure should raise a PROTECTION exception only when the low-address protection is enabled in the control register 0 and the address is low. This patch removes ASCE checks from the function and renames it to better reflect its behavior. Cc: Thomas Huth <thuth@linux.vnet.ibm.com> Signed-off-by: Alexander Yarygin <yarygin@linux.vnet.ibm.com> Reviewed-by: Thomas Huth <thuth@linux.vnet.ibm.com> Reviewed-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Acked-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/gaccess.c11
-rw-r--r--arch/s390/kvm/gaccess.h2
-rw-r--r--arch/s390/kvm/priv.c4
3 files changed, 9 insertions, 8 deletions
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 633fe9bd75a9..c230904429cc 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -697,28 +697,29 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
697} 697}
698 698
699/** 699/**
700 * kvm_s390_check_low_addr_protection - check for low-address protection 700 * kvm_s390_check_low_addr_prot_real - check for low-address protection
701 * @ga: Guest address 701 * @gra: Guest real address
702 * 702 *
703 * Checks whether an address is subject to low-address protection and set 703 * Checks whether an address is subject to low-address protection and set
704 * up vcpu->arch.pgm accordingly if necessary. 704 * up vcpu->arch.pgm accordingly if necessary.
705 * 705 *
706 * Return: 0 if no protection exception, or PGM_PROTECTION if protected. 706 * Return: 0 if no protection exception, or PGM_PROTECTION if protected.
707 */ 707 */
708int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga) 708int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
709{ 709{
710 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; 710 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
711 psw_t *psw = &vcpu->arch.sie_block->gpsw; 711 psw_t *psw = &vcpu->arch.sie_block->gpsw;
712 struct trans_exc_code_bits *tec_bits; 712 struct trans_exc_code_bits *tec_bits;
713 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
713 714
714 if (!is_low_address(ga) || !low_address_protection_enabled(vcpu)) 715 if (!ctlreg0.lap || !is_low_address(gra))
715 return 0; 716 return 0;
716 717
717 memset(pgm, 0, sizeof(*pgm)); 718 memset(pgm, 0, sizeof(*pgm));
718 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; 719 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
719 tec_bits->fsi = FSI_STORE; 720 tec_bits->fsi = FSI_STORE;
720 tec_bits->as = psw_bits(*psw).as; 721 tec_bits->as = psw_bits(*psw).as;
721 tec_bits->addr = ga >> PAGE_SHIFT; 722 tec_bits->addr = gra >> PAGE_SHIFT;
722 pgm->code = PGM_PROTECTION; 723 pgm->code = PGM_PROTECTION;
723 724
724 return pgm->code; 725 return pgm->code;
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 0149cf15058a..20de77ed8eba 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -330,6 +330,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
330void ipte_lock(struct kvm_vcpu *vcpu); 330void ipte_lock(struct kvm_vcpu *vcpu);
331void ipte_unlock(struct kvm_vcpu *vcpu); 331void ipte_unlock(struct kvm_vcpu *vcpu);
332int ipte_lock_held(struct kvm_vcpu *vcpu); 332int ipte_lock_held(struct kvm_vcpu *vcpu);
333int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga); 333int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
334 334
335#endif /* __KVM_S390_GACCESS_H */ 335#endif /* __KVM_S390_GACCESS_H */
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index b982fbca34df..5f2642576797 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -207,7 +207,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
207 kvm_s390_get_regs_rre(vcpu, NULL, &reg2); 207 kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
208 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 208 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
209 addr = kvm_s390_logical_to_effective(vcpu, addr); 209 addr = kvm_s390_logical_to_effective(vcpu, addr);
210 if (kvm_s390_check_low_addr_protection(vcpu, addr)) 210 if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
211 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 211 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
212 addr = kvm_s390_real_to_abs(vcpu, addr); 212 addr = kvm_s390_real_to_abs(vcpu, addr);
213 213
@@ -680,7 +680,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
680 } 680 }
681 681
682 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 682 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
683 if (kvm_s390_check_low_addr_protection(vcpu, start)) 683 if (kvm_s390_check_low_addr_prot_real(vcpu, start))
684 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 684 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
685 } 685 }
686 686