aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kvm/mmu.c')
-rw-r--r--arch/arm/kvm/mmu.c110
1 files changed, 102 insertions, 8 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 7789857d1470..80bb1e6c2c29 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -144,8 +144,9 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
144 while (addr < end) { 144 while (addr < end) {
145 pgd = pgdp + pgd_index(addr); 145 pgd = pgdp + pgd_index(addr);
146 pud = pud_offset(pgd, addr); 146 pud = pud_offset(pgd, addr);
147 pte = NULL;
147 if (pud_none(*pud)) { 148 if (pud_none(*pud)) {
148 addr = pud_addr_end(addr, end); 149 addr = kvm_pud_addr_end(addr, end);
149 continue; 150 continue;
150 } 151 }
151 152
@@ -155,13 +156,13 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
155 * move on. 156 * move on.
156 */ 157 */
157 clear_pud_entry(kvm, pud, addr); 158 clear_pud_entry(kvm, pud, addr);
158 addr = pud_addr_end(addr, end); 159 addr = kvm_pud_addr_end(addr, end);
159 continue; 160 continue;
160 } 161 }
161 162
162 pmd = pmd_offset(pud, addr); 163 pmd = pmd_offset(pud, addr);
163 if (pmd_none(*pmd)) { 164 if (pmd_none(*pmd)) {
164 addr = pmd_addr_end(addr, end); 165 addr = kvm_pmd_addr_end(addr, end);
165 continue; 166 continue;
166 } 167 }
167 168
@@ -174,12 +175,12 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
174 /* 175 /*
175 * If the pmd entry is to be cleared, walk back up the ladder 176 * If the pmd entry is to be cleared, walk back up the ladder
176 */ 177 */
177 if (kvm_pmd_huge(*pmd) || page_empty(pte)) { 178 if (kvm_pmd_huge(*pmd) || (pte && page_empty(pte))) {
178 clear_pmd_entry(kvm, pmd, addr); 179 clear_pmd_entry(kvm, pmd, addr);
179 next = pmd_addr_end(addr, end); 180 next = kvm_pmd_addr_end(addr, end);
180 if (page_empty(pmd) && !page_empty(pud)) { 181 if (page_empty(pmd) && !page_empty(pud)) {
181 clear_pud_entry(kvm, pud, addr); 182 clear_pud_entry(kvm, pud, addr);
182 next = pud_addr_end(addr, end); 183 next = kvm_pud_addr_end(addr, end);
183 } 184 }
184 } 185 }
185 186
@@ -187,6 +188,99 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
187 } 188 }
188} 189}
189 190
191static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
192 phys_addr_t addr, phys_addr_t end)
193{
194 pte_t *pte;
195
196 pte = pte_offset_kernel(pmd, addr);
197 do {
198 if (!pte_none(*pte)) {
199 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
200 kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
201 }
202 } while (pte++, addr += PAGE_SIZE, addr != end);
203}
204
205static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
206 phys_addr_t addr, phys_addr_t end)
207{
208 pmd_t *pmd;
209 phys_addr_t next;
210
211 pmd = pmd_offset(pud, addr);
212 do {
213 next = kvm_pmd_addr_end(addr, end);
214 if (!pmd_none(*pmd)) {
215 if (kvm_pmd_huge(*pmd)) {
216 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
217 kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
218 } else {
219 stage2_flush_ptes(kvm, pmd, addr, next);
220 }
221 }
222 } while (pmd++, addr = next, addr != end);
223}
224
225static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
226 phys_addr_t addr, phys_addr_t end)
227{
228 pud_t *pud;
229 phys_addr_t next;
230
231 pud = pud_offset(pgd, addr);
232 do {
233 next = kvm_pud_addr_end(addr, end);
234 if (!pud_none(*pud)) {
235 if (pud_huge(*pud)) {
236 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
237 kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
238 } else {
239 stage2_flush_pmds(kvm, pud, addr, next);
240 }
241 }
242 } while (pud++, addr = next, addr != end);
243}
244
245static void stage2_flush_memslot(struct kvm *kvm,
246 struct kvm_memory_slot *memslot)
247{
248 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
249 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
250 phys_addr_t next;
251 pgd_t *pgd;
252
253 pgd = kvm->arch.pgd + pgd_index(addr);
254 do {
255 next = kvm_pgd_addr_end(addr, end);
256 stage2_flush_puds(kvm, pgd, addr, next);
257 } while (pgd++, addr = next, addr != end);
258}
259
260/**
261 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
262 * @kvm: The struct kvm pointer
263 *
264 * Go through the stage 2 page tables and invalidate any cache lines
265 * backing memory already mapped to the VM.
266 */
267void stage2_flush_vm(struct kvm *kvm)
268{
269 struct kvm_memslots *slots;
270 struct kvm_memory_slot *memslot;
271 int idx;
272
273 idx = srcu_read_lock(&kvm->srcu);
274 spin_lock(&kvm->mmu_lock);
275
276 slots = kvm_memslots(kvm);
277 kvm_for_each_memslot(memslot, slots)
278 stage2_flush_memslot(kvm, memslot);
279
280 spin_unlock(&kvm->mmu_lock);
281 srcu_read_unlock(&kvm->srcu, idx);
282}
283
190/** 284/**
191 * free_boot_hyp_pgd - free HYP boot page tables 285 * free_boot_hyp_pgd - free HYP boot page tables
192 * 286 *
@@ -715,7 +809,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
715 kvm_set_s2pmd_writable(&new_pmd); 809 kvm_set_s2pmd_writable(&new_pmd);
716 kvm_set_pfn_dirty(pfn); 810 kvm_set_pfn_dirty(pfn);
717 } 811 }
718 coherent_icache_guest_page(kvm, hva & PMD_MASK, PMD_SIZE); 812 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
719 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); 813 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
720 } else { 814 } else {
721 pte_t new_pte = pfn_pte(pfn, PAGE_S2); 815 pte_t new_pte = pfn_pte(pfn, PAGE_S2);
@@ -723,7 +817,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
723 kvm_set_s2pte_writable(&new_pte); 817 kvm_set_s2pte_writable(&new_pte);
724 kvm_set_pfn_dirty(pfn); 818 kvm_set_pfn_dirty(pfn);
725 } 819 }
726 coherent_icache_guest_page(kvm, hva, PAGE_SIZE); 820 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
727 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false); 821 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false);
728 } 822 }
729 823