aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_64_mmu_hv.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c51
1 files changed, 39 insertions, 12 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index d03eb6f7b05..d95d11322a1 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -705,7 +705,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
705 goto out_unlock; 705 goto out_unlock;
706 hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; 706 hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
707 707
708 rmap = &memslot->rmap[gfn - memslot->base_gfn]; 708 rmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
709 lock_rmap(rmap); 709 lock_rmap(rmap);
710 710
711 /* Check if we might have been invalidated; let the guest retry if so */ 711 /* Check if we might have been invalidated; let the guest retry if so */
@@ -756,9 +756,12 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
756 goto out_put; 756 goto out_put;
757} 757}
758 758
759static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, 759static int kvm_handle_hva_range(struct kvm *kvm,
760 int (*handler)(struct kvm *kvm, unsigned long *rmapp, 760 unsigned long start,
761 unsigned long gfn)) 761 unsigned long end,
762 int (*handler)(struct kvm *kvm,
763 unsigned long *rmapp,
764 unsigned long gfn))
762{ 765{
763 int ret; 766 int ret;
764 int retval = 0; 767 int retval = 0;
@@ -767,15 +770,25 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
767 770
768 slots = kvm_memslots(kvm); 771 slots = kvm_memslots(kvm);
769 kvm_for_each_memslot(memslot, slots) { 772 kvm_for_each_memslot(memslot, slots) {
770 unsigned long start = memslot->userspace_addr; 773 unsigned long hva_start, hva_end;
771 unsigned long end; 774 gfn_t gfn, gfn_end;
772 775
773 end = start + (memslot->npages << PAGE_SHIFT); 776 hva_start = max(start, memslot->userspace_addr);
774 if (hva >= start && hva < end) { 777 hva_end = min(end, memslot->userspace_addr +
775 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; 778 (memslot->npages << PAGE_SHIFT));
779 if (hva_start >= hva_end)
780 continue;
781 /*
782 * {gfn(page) | page intersects with [hva_start, hva_end)} =
783 * {gfn, gfn+1, ..., gfn_end-1}.
784 */
785 gfn = hva_to_gfn_memslot(hva_start, memslot);
786 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
787
788 for (; gfn < gfn_end; ++gfn) {
789 gfn_t gfn_offset = gfn - memslot->base_gfn;
776 790
777 ret = handler(kvm, &memslot->rmap[gfn_offset], 791 ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn);
778 memslot->base_gfn + gfn_offset);
779 retval |= ret; 792 retval |= ret;
780 } 793 }
781 } 794 }
@@ -783,6 +796,13 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
783 return retval; 796 return retval;
784} 797}
785 798
799static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
800 int (*handler)(struct kvm *kvm, unsigned long *rmapp,
801 unsigned long gfn))
802{
803 return kvm_handle_hva_range(kvm, hva, hva + 1, handler);
804}
805
786static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, 806static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
787 unsigned long gfn) 807 unsigned long gfn)
788{ 808{
@@ -850,6 +870,13 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
850 return 0; 870 return 0;
851} 871}
852 872
873int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
874{
875 if (kvm->arch.using_mmu_notifiers)
876 kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
877 return 0;
878}
879
853static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, 880static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
854 unsigned long gfn) 881 unsigned long gfn)
855{ 882{
@@ -1009,7 +1036,7 @@ long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1009 unsigned long *rmapp, *map; 1036 unsigned long *rmapp, *map;
1010 1037
1011 preempt_disable(); 1038 preempt_disable();
1012 rmapp = memslot->rmap; 1039 rmapp = memslot->arch.rmap;
1013 map = memslot->dirty_bitmap; 1040 map = memslot->dirty_bitmap;
1014 for (i = 0; i < memslot->npages; ++i) { 1041 for (i = 0; i < memslot->npages; ++i) {
1015 if (kvm_test_clear_dirty(kvm, rmapp)) 1042 if (kvm_test_clear_dirty(kvm, rmapp))