diff options
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 24 |
1 files changed, 24 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 0aa40734c8f6..1029e2201bf6 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -46,6 +46,7 @@ | |||
46 | static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, | 46 | static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, |
47 | long pte_index, unsigned long pteh, | 47 | long pte_index, unsigned long pteh, |
48 | unsigned long ptel, unsigned long *pte_idx_ret); | 48 | unsigned long ptel, unsigned long *pte_idx_ret); |
49 | static void kvmppc_rmap_reset(struct kvm *kvm); | ||
49 | 50 | ||
50 | long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) | 51 | long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) |
51 | { | 52 | { |
@@ -144,6 +145,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp) | |||
144 | /* Set the entire HPT to 0, i.e. invalid HPTEs */ | 145 | /* Set the entire HPT to 0, i.e. invalid HPTEs */ |
145 | memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); | 146 | memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); |
146 | /* | 147 | /* |
148 | * Reset all the reverse-mapping chains for all memslots | ||
149 | */ | ||
150 | kvmppc_rmap_reset(kvm); | ||
151 | /* | ||
147 | * Set the whole last_vcpu array to an invalid vcpu number. | 152 | * Set the whole last_vcpu array to an invalid vcpu number. |
148 | * This ensures that each vcpu will flush its TLB on next entry. | 153 | * This ensures that each vcpu will flush its TLB on next entry. |
149 | */ | 154 | */ |
@@ -772,6 +777,25 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
772 | goto out_put; | 777 | goto out_put; |
773 | } | 778 | } |
774 | 779 | ||
780 | static void kvmppc_rmap_reset(struct kvm *kvm) | ||
781 | { | ||
782 | struct kvm_memslots *slots; | ||
783 | struct kvm_memory_slot *memslot; | ||
784 | int srcu_idx; | ||
785 | |||
786 | srcu_idx = srcu_read_lock(&kvm->srcu); | ||
787 | slots = kvm->memslots; | ||
788 | kvm_for_each_memslot(memslot, slots) { | ||
789 | /* | ||
790 | * This assumes it is acceptable to lose reference and | ||
791 | * change bits across a reset. | ||
792 | */ | ||
793 | memset(memslot->arch.rmap, 0, | ||
794 | memslot->npages * sizeof(*memslot->arch.rmap)); | ||
795 | } | ||
796 | srcu_read_unlock(&kvm->srcu, srcu_idx); | ||
797 | } | ||
798 | |||
775 | static int kvm_handle_hva_range(struct kvm *kvm, | 799 | static int kvm_handle_hva_range(struct kvm *kvm, |
776 | unsigned long start, | 800 | unsigned long start, |
777 | unsigned long end, | 801 | unsigned long end, |