aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2012-11-21 18:27:19 -0500
committerAlexander Graf <agraf@suse.de>2012-12-05 19:33:58 -0500
commita64fd707481631b9682f9baeefac489bc55bbf73 (patch)
tree362ad5936a0629a514a5dc8627e9521617b62047 /arch/powerpc/kvm
parenta2932923ccf63c419c77aaa18ac09be98f2c94d8 (diff)
KVM: PPC: Book3S HV: Reset reverse-map chains when resetting the HPT
With HV-style KVM, we maintain reverse-mapping lists that enable us to find all the HPT (hashed page table) entries that reference each guest physical page, with the heads of the lists in the memslot->arch.rmap arrays. When we reset the HPT (i.e. when we reboot the VM), we clear out all the HPT entries but we were not clearing out the reverse mapping lists. The result is that as we create new HPT entries, the lists get corrupted, which can easily lead to loops, resulting in the host kernel hanging when it tries to traverse those lists. This fixes the problem by zeroing out all the reverse mapping lists when we zero out the HPT. This incidentally means that we are also zeroing our record of the referenced and changed bits (not the bits in the Linux PTEs, used by the Linux MM subsystem, but the bits used by the KVM_GET_DIRTY_LOG ioctl, and those used by kvm_age_hva() and kvm_test_age_hva()). Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 0aa40734c8f6..1029e2201bf6 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -46,6 +46,7 @@
46static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, 46static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
47 long pte_index, unsigned long pteh, 47 long pte_index, unsigned long pteh,
48 unsigned long ptel, unsigned long *pte_idx_ret); 48 unsigned long ptel, unsigned long *pte_idx_ret);
49static void kvmppc_rmap_reset(struct kvm *kvm);
49 50
50long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) 51long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
51{ 52{
@@ -144,6 +145,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
144 /* Set the entire HPT to 0, i.e. invalid HPTEs */ 145 /* Set the entire HPT to 0, i.e. invalid HPTEs */
145 memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); 146 memset((void *)kvm->arch.hpt_virt, 0, 1ul << order);
146 /* 147 /*
148 * Reset all the reverse-mapping chains for all memslots
149 */
150 kvmppc_rmap_reset(kvm);
151 /*
147 * Set the whole last_vcpu array to an invalid vcpu number. 152 * Set the whole last_vcpu array to an invalid vcpu number.
148 * This ensures that each vcpu will flush its TLB on next entry. 153 * This ensures that each vcpu will flush its TLB on next entry.
149 */ 154 */
@@ -772,6 +777,25 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
772 goto out_put; 777 goto out_put;
773} 778}
774 779
780static void kvmppc_rmap_reset(struct kvm *kvm)
781{
782 struct kvm_memslots *slots;
783 struct kvm_memory_slot *memslot;
784 int srcu_idx;
785
786 srcu_idx = srcu_read_lock(&kvm->srcu);
787 slots = kvm->memslots;
788 kvm_for_each_memslot(memslot, slots) {
789 /*
790 * This assumes it is acceptable to lose reference and
791 * change bits across a reset.
792 */
793 memset(memslot->arch.rmap, 0,
794 memslot->npages * sizeof(*memslot->arch.rmap));
795 }
796 srcu_read_unlock(&kvm->srcu, srcu_idx);
797}
798
775static int kvm_handle_hva_range(struct kvm *kvm, 799static int kvm_handle_hva_range(struct kvm *kvm,
776 unsigned long start, 800 unsigned long start,
777 unsigned long end, 801 unsigned long end,