aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2014-09-09 06:27:09 -0400
committerMarc Zyngier <marc.zyngier@arm.com>2014-09-11 06:31:13 -0400
commita7d079cea2dffb112e26da2566dd84c0ef1fce97 (patch)
tree934d671f75dd5aa1f30b0f9a5fdd18392a1b6528
parentde56fb1923ca11f428bf557870e0faa99f38762e (diff)
ARM/arm64: KVM: fix use of WnR bit in kvm_is_write_fault()
The ISS encoding for an exception from a Data Abort has a WnR bit[6] that indicates whether the Data Abort was caused by a read or a write instruction. While there are several fields in the encoding that are only valid if the ISV bit[24] is set, WnR is not one of them, so we can read it unconditionally. Instead of fixing both implementations of kvm_is_write_fault() in place, reimplement it just once using kvm_vcpu_dabt_iswrite(), which already does the right thing with respect to the WnR bit. Also fix up the callers to pass 'vcpu' Acked-by: Laszlo Ersek <lersek@redhat.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Acked-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r--arch/arm/include/asm/kvm_mmu.h11
-rw-r--r--arch/arm/kvm/mmu.c12
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h13
3 files changed, 10 insertions, 26 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 5cc0b0f5f72f..3f688b458143 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -78,17 +78,6 @@ static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
78 flush_pmd_entry(pte); 78 flush_pmd_entry(pte);
79} 79}
80 80
81static inline bool kvm_is_write_fault(unsigned long hsr)
82{
83 unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
84 if (hsr_ec == HSR_EC_IABT)
85 return false;
86 else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR))
87 return false;
88 else
89 return true;
90}
91
92static inline void kvm_clean_pgd(pgd_t *pgd) 81static inline void kvm_clean_pgd(pgd_t *pgd)
93{ 82{
94 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); 83 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 62f5642153f9..bb06f76a8f89 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -746,6 +746,14 @@ static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
746 return false; 746 return false;
747} 747}
748 748
749static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
750{
751 if (kvm_vcpu_trap_is_iabt(vcpu))
752 return false;
753
754 return kvm_vcpu_dabt_iswrite(vcpu);
755}
756
749static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 757static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
750 struct kvm_memory_slot *memslot, unsigned long hva, 758 struct kvm_memory_slot *memslot, unsigned long hva,
751 unsigned long fault_status) 759 unsigned long fault_status)
@@ -760,7 +768,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
760 pfn_t pfn; 768 pfn_t pfn;
761 pgprot_t mem_type = PAGE_S2; 769 pgprot_t mem_type = PAGE_S2;
762 770
763 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu)); 771 write_fault = kvm_is_write_fault(vcpu);
764 if (fault_status == FSC_PERM && !write_fault) { 772 if (fault_status == FSC_PERM && !write_fault) {
765 kvm_err("Unexpected L2 read permission error\n"); 773 kvm_err("Unexpected L2 read permission error\n");
766 return -EFAULT; 774 return -EFAULT;
@@ -886,7 +894,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
886 gfn = fault_ipa >> PAGE_SHIFT; 894 gfn = fault_ipa >> PAGE_SHIFT;
887 memslot = gfn_to_memslot(vcpu->kvm, gfn); 895 memslot = gfn_to_memslot(vcpu->kvm, gfn);
888 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); 896 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
889 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu)); 897 write_fault = kvm_is_write_fault(vcpu);
890 if (kvm_is_error_hva(hva) || (write_fault && !writable)) { 898 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
891 if (is_iabt) { 899 if (is_iabt) {
892 /* Prefetch Abort on I/O address */ 900 /* Prefetch Abort on I/O address */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 8e138c7c53ac..737da742b293 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -93,19 +93,6 @@ void kvm_clear_hyp_idmap(void);
93#define kvm_set_pte(ptep, pte) set_pte(ptep, pte) 93#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
94#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) 94#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
95 95
96static inline bool kvm_is_write_fault(unsigned long esr)
97{
98 unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
99
100 if (esr_ec == ESR_EL2_EC_IABT)
101 return false;
102
103 if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
104 return false;
105
106 return true;
107}
108
109static inline void kvm_clean_pgd(pgd_t *pgd) {} 96static inline void kvm_clean_pgd(pgd_t *pgd) {}
110static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} 97static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
111static inline void kvm_clean_pte(pte_t *pte) {} 98static inline void kvm_clean_pte(pte_t *pte) {}