aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorSuzuki K Poulose <suzuki.poulose@arm.com>2016-03-23 08:08:02 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2016-04-21 08:58:18 -0400
commit7a1c831ee8553b8199f21183942a46adf808f174 (patch)
tree1cff3cdb3b14bf0636acfab9a40addf39122461e /arch/arm/kvm
parent64f324979210d4064adf64f19da40c125c9dd137 (diff)
kvm-arm: Add stage2 page table modifiers
Now that the hyp page table is handled by different set of routines, rename the original shared routines to stage2 handlers. Also make explicit use of the stage2 page table helpers. unmap_range has been merged to existing unmap_stage2_range. Cc: Marc Zyngier <marc.zyngier@arm.com> Acked-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/mmu.c97
1 files changed, 44 insertions, 53 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index af526f67022c..f2a6d9b8ca2d 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -152,26 +152,26 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
152 return p; 152 return p;
153} 153}
154 154
155static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr) 155static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
156{ 156{
157 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0); 157 pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL);
158 pgd_clear(pgd); 158 stage2_pgd_clear(pgd);
159 kvm_tlb_flush_vmid_ipa(kvm, addr); 159 kvm_tlb_flush_vmid_ipa(kvm, addr);
160 pud_free(NULL, pud_table); 160 stage2_pud_free(pud_table);
161 put_page(virt_to_page(pgd)); 161 put_page(virt_to_page(pgd));
162} 162}
163 163
164static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) 164static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
165{ 165{
166 pmd_t *pmd_table = pmd_offset(pud, 0); 166 pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0);
167 VM_BUG_ON(pud_huge(*pud)); 167 VM_BUG_ON(stage2_pud_huge(*pud));
168 pud_clear(pud); 168 stage2_pud_clear(pud);
169 kvm_tlb_flush_vmid_ipa(kvm, addr); 169 kvm_tlb_flush_vmid_ipa(kvm, addr);
170 pmd_free(NULL, pmd_table); 170 stage2_pmd_free(pmd_table);
171 put_page(virt_to_page(pud)); 171 put_page(virt_to_page(pud));
172} 172}
173 173
174static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) 174static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
175{ 175{
176 pte_t *pte_table = pte_offset_kernel(pmd, 0); 176 pte_t *pte_table = pte_offset_kernel(pmd, 0);
177 VM_BUG_ON(pmd_thp_or_huge(*pmd)); 177 VM_BUG_ON(pmd_thp_or_huge(*pmd));
@@ -201,7 +201,7 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
201 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure 201 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
202 * the IO subsystem will never hit in the cache. 202 * the IO subsystem will never hit in the cache.
203 */ 203 */
204static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, 204static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
205 phys_addr_t addr, phys_addr_t end) 205 phys_addr_t addr, phys_addr_t end)
206{ 206{
207 phys_addr_t start_addr = addr; 207 phys_addr_t start_addr = addr;
@@ -223,19 +223,19 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
223 } 223 }
224 } while (pte++, addr += PAGE_SIZE, addr != end); 224 } while (pte++, addr += PAGE_SIZE, addr != end);
225 225
226 if (kvm_pte_table_empty(kvm, start_pte)) 226 if (stage2_pte_table_empty(start_pte))
227 clear_pmd_entry(kvm, pmd, start_addr); 227 clear_stage2_pmd_entry(kvm, pmd, start_addr);
228} 228}
229 229
230static void unmap_pmds(struct kvm *kvm, pud_t *pud, 230static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
231 phys_addr_t addr, phys_addr_t end) 231 phys_addr_t addr, phys_addr_t end)
232{ 232{
233 phys_addr_t next, start_addr = addr; 233 phys_addr_t next, start_addr = addr;
234 pmd_t *pmd, *start_pmd; 234 pmd_t *pmd, *start_pmd;
235 235
236 start_pmd = pmd = pmd_offset(pud, addr); 236 start_pmd = pmd = stage2_pmd_offset(pud, addr);
237 do { 237 do {
238 next = kvm_pmd_addr_end(addr, end); 238 next = stage2_pmd_addr_end(addr, end);
239 if (!pmd_none(*pmd)) { 239 if (!pmd_none(*pmd)) {
240 if (pmd_thp_or_huge(*pmd)) { 240 if (pmd_thp_or_huge(*pmd)) {
241 pmd_t old_pmd = *pmd; 241 pmd_t old_pmd = *pmd;
@@ -247,57 +247,64 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
247 247
248 put_page(virt_to_page(pmd)); 248 put_page(virt_to_page(pmd));
249 } else { 249 } else {
250 unmap_ptes(kvm, pmd, addr, next); 250 unmap_stage2_ptes(kvm, pmd, addr, next);
251 } 251 }
252 } 252 }
253 } while (pmd++, addr = next, addr != end); 253 } while (pmd++, addr = next, addr != end);
254 254
255 if (kvm_pmd_table_empty(kvm, start_pmd)) 255 if (stage2_pmd_table_empty(start_pmd))
256 clear_pud_entry(kvm, pud, start_addr); 256 clear_stage2_pud_entry(kvm, pud, start_addr);
257} 257}
258 258
259static void unmap_puds(struct kvm *kvm, pgd_t *pgd, 259static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
260 phys_addr_t addr, phys_addr_t end) 260 phys_addr_t addr, phys_addr_t end)
261{ 261{
262 phys_addr_t next, start_addr = addr; 262 phys_addr_t next, start_addr = addr;
263 pud_t *pud, *start_pud; 263 pud_t *pud, *start_pud;
264 264
265 start_pud = pud = pud_offset(pgd, addr); 265 start_pud = pud = stage2_pud_offset(pgd, addr);
266 do { 266 do {
267 next = kvm_pud_addr_end(addr, end); 267 next = stage2_pud_addr_end(addr, end);
268 if (!pud_none(*pud)) { 268 if (!stage2_pud_none(*pud)) {
269 if (pud_huge(*pud)) { 269 if (stage2_pud_huge(*pud)) {
270 pud_t old_pud = *pud; 270 pud_t old_pud = *pud;
271 271
272 pud_clear(pud); 272 stage2_pud_clear(pud);
273 kvm_tlb_flush_vmid_ipa(kvm, addr); 273 kvm_tlb_flush_vmid_ipa(kvm, addr);
274
275 kvm_flush_dcache_pud(old_pud); 274 kvm_flush_dcache_pud(old_pud);
276
277 put_page(virt_to_page(pud)); 275 put_page(virt_to_page(pud));
278 } else { 276 } else {
279 unmap_pmds(kvm, pud, addr, next); 277 unmap_stage2_pmds(kvm, pud, addr, next);
280 } 278 }
281 } 279 }
282 } while (pud++, addr = next, addr != end); 280 } while (pud++, addr = next, addr != end);
283 281
284 if (kvm_pud_table_empty(kvm, start_pud)) 282 if (stage2_pud_table_empty(start_pud))
285 clear_pgd_entry(kvm, pgd, start_addr); 283 clear_stage2_pgd_entry(kvm, pgd, start_addr);
286} 284}
287 285
288 286/**
289static void unmap_range(struct kvm *kvm, pgd_t *pgdp, 287 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
290 phys_addr_t start, u64 size) 288 * @kvm: The VM pointer
289 * @start: The intermediate physical base address of the range to unmap
290 * @size: The size of the area to unmap
291 *
292 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
293 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
294 * destroying the VM), otherwise another faulting VCPU may come in and mess
295 * with things behind our backs.
296 */
297static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
291{ 298{
292 pgd_t *pgd; 299 pgd_t *pgd;
293 phys_addr_t addr = start, end = start + size; 300 phys_addr_t addr = start, end = start + size;
294 phys_addr_t next; 301 phys_addr_t next;
295 302
296 pgd = pgdp + kvm_pgd_index(addr); 303 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
297 do { 304 do {
298 next = kvm_pgd_addr_end(addr, end); 305 next = stage2_pgd_addr_end(addr, end);
299 if (!pgd_none(*pgd)) 306 if (!stage2_pgd_none(*pgd))
300 unmap_puds(kvm, pgd, addr, next); 307 unmap_stage2_puds(kvm, pgd, addr, next);
301 } while (pgd++, addr = next, addr != end); 308 } while (pgd++, addr = next, addr != end);
302} 309}
303 310
@@ -792,22 +799,6 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
792 return 0; 799 return 0;
793} 800}
794 801
795/**
796 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
797 * @kvm: The VM pointer
798 * @start: The intermediate physical base address of the range to unmap
799 * @size: The size of the area to unmap
800 *
801 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
802 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
803 * destroying the VM), otherwise another faulting VCPU may come in and mess
804 * with things behind our backs.
805 */
806static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
807{
808 unmap_range(kvm, kvm->arch.pgd, start, size);
809}
810
811static void stage2_unmap_memslot(struct kvm *kvm, 802static void stage2_unmap_memslot(struct kvm *kvm,
812 struct kvm_memory_slot *memslot) 803 struct kvm_memory_slot *memslot)
813{ 804{