aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/kvm_mmu.h13
-rw-r--r--arch/arm/kvm/mmu.c10
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h4
3 files changed, 22 insertions, 5 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 6d0f3d3023b7..891afe78311a 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -114,6 +114,19 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
114 pmd_val(*pmd) |= L_PMD_S2_RDWR; 114 pmd_val(*pmd) |= L_PMD_S2_RDWR;
115} 115}
116 116
117/* Open coded p*d_addr_end that can deal with 64bit addresses */
118#define kvm_pgd_addr_end(addr, end) \
119({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
120 (__boundary - 1 < (end) - 1)? __boundary: (end); \
121})
122
123#define kvm_pud_addr_end(addr,end) (end)
124
125#define kvm_pmd_addr_end(addr, end) \
126({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
127 (__boundary - 1 < (end) - 1)? __boundary: (end); \
128})
129
117struct kvm; 130struct kvm;
118 131
119static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, 132static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index fc71a8df0e13..c1c08b240f35 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -145,7 +145,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
145 pgd = pgdp + pgd_index(addr); 145 pgd = pgdp + pgd_index(addr);
146 pud = pud_offset(pgd, addr); 146 pud = pud_offset(pgd, addr);
147 if (pud_none(*pud)) { 147 if (pud_none(*pud)) {
148 addr = pud_addr_end(addr, end); 148 addr = kvm_pud_addr_end(addr, end);
149 continue; 149 continue;
150 } 150 }
151 151
@@ -155,13 +155,13 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
155 * move on. 155 * move on.
156 */ 156 */
157 clear_pud_entry(kvm, pud, addr); 157 clear_pud_entry(kvm, pud, addr);
158 addr = pud_addr_end(addr, end); 158 addr = kvm_pud_addr_end(addr, end);
159 continue; 159 continue;
160 } 160 }
161 161
162 pmd = pmd_offset(pud, addr); 162 pmd = pmd_offset(pud, addr);
163 if (pmd_none(*pmd)) { 163 if (pmd_none(*pmd)) {
164 addr = pmd_addr_end(addr, end); 164 addr = kvm_pmd_addr_end(addr, end);
165 continue; 165 continue;
166 } 166 }
167 167
@@ -176,10 +176,10 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
176 */ 176 */
177 if (kvm_pmd_huge(*pmd) || page_empty(pte)) { 177 if (kvm_pmd_huge(*pmd) || page_empty(pte)) {
178 clear_pmd_entry(kvm, pmd, addr); 178 clear_pmd_entry(kvm, pmd, addr);
179 next = pmd_addr_end(addr, end); 179 next = kvm_pmd_addr_end(addr, end);
180 if (page_empty(pmd) && !page_empty(pud)) { 180 if (page_empty(pmd) && !page_empty(pud)) {
181 clear_pud_entry(kvm, pud, addr); 181 clear_pud_entry(kvm, pud, addr);
182 next = pud_addr_end(addr, end); 182 next = kvm_pud_addr_end(addr, end);
183 } 183 }
184 } 184 }
185 185
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 6eaf69b5e42c..00c0cc8b8045 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -121,6 +121,10 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
121 pmd_val(*pmd) |= PMD_S2_RDWR; 121 pmd_val(*pmd) |= PMD_S2_RDWR;
122} 122}
123 123
124#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
125#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
126#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
127
124struct kvm; 128struct kvm;
125 129
126#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) 130#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))