aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-12-06 09:47:47 -0500
committerJames Hogan <james.hogan@imgtec.com>2017-02-03 10:21:22 -0500
commitf0c0c330f7bb1a640968798b63c0dffc6a8af0ec (patch)
tree9474de30fe654ca6208cc7944d0790106f02861d
parent64ebc9e24074403c4127b06c0203f3e7b3367e69 (diff)
KVM: MIPS/MMU: Add GPA PT mkclean helper
Add a helper function to make a range of guest physical address (GPA) mappings in the GPA page table clean so that writes can be caught. This will be used in a few places to manage dirty page logging. Note that until the dirty bit is transferred from GPA page table entries to GVA page table entries in an upcoming patch this won't trigger a TLB modified exception on write. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
-rw-r--r--arch/mips/include/asm/kvm_host.h1
-rw-r--r--arch/mips/kvm/mmu.c124
2 files changed, 125 insertions, 0 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 13c9e128bb86..a7394940119c 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -643,6 +643,7 @@ enum kvm_mips_flush {
643}; 643};
644void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags); 644void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
645bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); 645bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
646int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
646pgd_t *kvm_pgd_alloc(void); 647pgd_t *kvm_pgd_alloc(void);
647void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); 648void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
648void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, 649void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 934bcc3732da..892fd0ede718 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -304,6 +304,130 @@ bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
304 end_gfn << PAGE_SHIFT); 304 end_gfn << PAGE_SHIFT);
305} 305}
306 306
307#define BUILD_PTE_RANGE_OP(name, op) \
308static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \
309 unsigned long end) \
310{ \
311 int ret = 0; \
312 int i_min = __pte_offset(start); \
313 int i_max = __pte_offset(end); \
314 int i; \
315 pte_t old, new; \
316 \
317 for (i = i_min; i <= i_max; ++i) { \
318 if (!pte_present(pte[i])) \
319 continue; \
320 \
321 old = pte[i]; \
322 new = op(old); \
323 if (pte_val(new) == pte_val(old)) \
324 continue; \
325 set_pte(pte + i, new); \
326 ret = 1; \
327 } \
328 return ret; \
329} \
330 \
331/* returns true if anything was done */ \
332static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \
333 unsigned long end) \
334{ \
335 int ret = 0; \
336 pte_t *pte; \
337 unsigned long cur_end = ~0ul; \
338 int i_min = __pmd_offset(start); \
339 int i_max = __pmd_offset(end); \
340 int i; \
341 \
342 for (i = i_min; i <= i_max; ++i, start = 0) { \
343 if (!pmd_present(pmd[i])) \
344 continue; \
345 \
346 pte = pte_offset(pmd + i, 0); \
347 if (i == i_max) \
348 cur_end = end; \
349 \
350 ret |= kvm_mips_##name##_pte(pte, start, cur_end); \
351 } \
352 return ret; \
353} \
354 \
355static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start, \
356 unsigned long end) \
357{ \
358 int ret = 0; \
359 pmd_t *pmd; \
360 unsigned long cur_end = ~0ul; \
361 int i_min = __pud_offset(start); \
362 int i_max = __pud_offset(end); \
363 int i; \
364 \
365 for (i = i_min; i <= i_max; ++i, start = 0) { \
366 if (!pud_present(pud[i])) \
367 continue; \
368 \
369 pmd = pmd_offset(pud + i, 0); \
370 if (i == i_max) \
371 cur_end = end; \
372 \
373 ret |= kvm_mips_##name##_pmd(pmd, start, cur_end); \
374 } \
375 return ret; \
376} \
377 \
378static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start, \
379 unsigned long end) \
380{ \
381 int ret = 0; \
382 pud_t *pud; \
383 unsigned long cur_end = ~0ul; \
384 int i_min = pgd_index(start); \
385 int i_max = pgd_index(end); \
386 int i; \
387 \
388 for (i = i_min; i <= i_max; ++i, start = 0) { \
389 if (!pgd_present(pgd[i])) \
390 continue; \
391 \
392 pud = pud_offset(pgd + i, 0); \
393 if (i == i_max) \
394 cur_end = end; \
395 \
396 ret |= kvm_mips_##name##_pud(pud, start, cur_end); \
397 } \
398 return ret; \
399}
400
401/*
402 * kvm_mips_mkclean_gpa_pt.
403 * Mark a range of guest physical address space clean (writes fault) in the VM's
404 * GPA page table to allow dirty page tracking.
405 */
406
407BUILD_PTE_RANGE_OP(mkclean, pte_mkclean)
408
409/**
410 * kvm_mips_mkclean_gpa_pt() - Make a range of guest physical addresses clean.
411 * @kvm: KVM pointer.
412 * @start_gfn: Guest frame number of first page in GPA range to flush.
413 * @end_gfn: Guest frame number of last page in GPA range to flush.
414 *
415 * Make a range of GPA mappings clean so that guest writes will fault and
416 * trigger dirty page logging.
417 *
418 * The caller must hold the @kvm->mmu_lock spinlock.
419 *
420 * Returns: Whether any GPA mappings were modified, which would require
421 * derived mappings (GVA page tables & TLB enties) to be
422 * invalidated.
423 */
424int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
425{
426 return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd,
427 start_gfn << PAGE_SHIFT,
428 end_gfn << PAGE_SHIFT);
429}
430
307/** 431/**
308 * kvm_mips_map_page() - Map a guest physical page. 432 * kvm_mips_map_page() - Map a guest physical page.
309 * @vcpu: VCPU pointer. 433 * @vcpu: VCPU pointer.