aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/kvm_mmu.h4
-rw-r--r--arch/arm/kvm/mmu.c4
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h16
3 files changed, 16 insertions, 8 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 2d122adcdb22..6d0f3d3023b7 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -116,8 +116,8 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
116 116
117struct kvm; 117struct kvm;
118 118
119static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva, 119static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
120 unsigned long size) 120 unsigned long size)
121{ 121{
122 /* 122 /*
123 * If we are going to insert an instruction page and the icache is 123 * If we are going to insert an instruction page and the icache is
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 7789857d1470..fc71a8df0e13 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -715,7 +715,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
715 kvm_set_s2pmd_writable(&new_pmd); 715 kvm_set_s2pmd_writable(&new_pmd);
716 kvm_set_pfn_dirty(pfn); 716 kvm_set_pfn_dirty(pfn);
717 } 717 }
718 coherent_icache_guest_page(kvm, hva & PMD_MASK, PMD_SIZE); 718 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
719 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); 719 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
720 } else { 720 } else {
721 pte_t new_pte = pfn_pte(pfn, PAGE_S2); 721 pte_t new_pte = pfn_pte(pfn, PAGE_S2);
@@ -723,7 +723,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
723 kvm_set_s2pte_writable(&new_pte); 723 kvm_set_s2pte_writable(&new_pte);
724 kvm_set_pfn_dirty(pfn); 724 kvm_set_pfn_dirty(pfn);
725 } 725 }
726 coherent_icache_guest_page(kvm, hva, PAGE_SIZE); 726 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
727 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false); 727 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false);
728 } 728 }
729 729
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 7f1f9408ff66..6eaf69b5e42c 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -106,7 +106,6 @@ static inline bool kvm_is_write_fault(unsigned long esr)
106 return true; 106 return true;
107} 107}
108 108
109static inline void kvm_clean_dcache_area(void *addr, size_t size) {}
110static inline void kvm_clean_pgd(pgd_t *pgd) {} 109static inline void kvm_clean_pgd(pgd_t *pgd) {}
111static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} 110static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
112static inline void kvm_clean_pte(pte_t *pte) {} 111static inline void kvm_clean_pte(pte_t *pte) {}
@@ -124,9 +123,19 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
124 123
125struct kvm; 124struct kvm;
126 125
127static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva, 126#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
128 unsigned long size) 127
128static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
129{ 129{
130 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
131}
132
133static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
134 unsigned long size)
135{
136 if (!vcpu_has_cache_enabled(vcpu))
137 kvm_flush_dcache_to_poc((void *)hva, size);
138
130 if (!icache_is_aliasing()) { /* PIPT */ 139 if (!icache_is_aliasing()) { /* PIPT */
131 flush_icache_range(hva, hva + size); 140 flush_icache_range(hva, hva + size);
132 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ 141 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
@@ -135,7 +144,6 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
135 } 144 }
136} 145}
137 146
138#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
139#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) 147#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
140 148
141#endif /* __ASSEMBLY__ */ 149#endif /* __ASSEMBLY__ */