summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2015-06-24 07:18:05 -0400
committerAlexander Graf <agraf@suse.de>2015-08-22 05:16:18 -0400
commit1e5bf454f58731e360e504253e85bae7aaa2d298 (patch)
tree06e2668ccf5a62c069a94147f8c08debf36d50fc /arch/powerpc
parentb4deba5c41e9f6d3239606c9e060853d9decfee1 (diff)
KVM: PPC: Book3S HV: Fix race in reading change bit when removing HPTE
The reference (R) and change (C) bits in a HPT entry can be set by hardware at any time up until the HPTE is invalidated and the TLB invalidation sequence has completed. This means that when removing a HPTE, we need to read the HPTE after the invalidation sequence has completed in order to obtain reliable values of R and C. The code in kvmppc_do_h_remove() used to do this. However, commit 6f22bd3265fb ("KVM: PPC: Book3S HV: Make HTAB code LE host aware") removed the read after invalidation as a side effect of other changes. This restores the read of the HPTE after invalidation. The user-visible effect of this bug would be that when migrating a guest, there is a small probability that a page modified by the guest and then unmapped by the guest might not get re-transmitted and thus the destination might end up with a stale copy of the page. Fixes: 6f22bd3265fb Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index b027a89737b6..c6d601cc9764 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -421,14 +421,20 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
421 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 421 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
422 v = pte & ~HPTE_V_HVLOCK; 422 v = pte & ~HPTE_V_HVLOCK;
423 if (v & HPTE_V_VALID) { 423 if (v & HPTE_V_VALID) {
424 u64 pte1;
425
426 pte1 = be64_to_cpu(hpte[1]);
427 hpte[0] &= ~cpu_to_be64(HPTE_V_VALID); 424 hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
428 rb = compute_tlbie_rb(v, pte1, pte_index); 425 rb = compute_tlbie_rb(v, be64_to_cpu(hpte[1]), pte_index);
429 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true); 426 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
430 /* Read PTE low word after tlbie to get final R/C values */ 427 /*
431 remove_revmap_chain(kvm, pte_index, rev, v, pte1); 428 * The reference (R) and change (C) bits in a HPT
429 * entry can be set by hardware at any time up until
430 * the HPTE is invalidated and the TLB invalidation
431 * sequence has completed. This means that when
432 * removing a HPTE, we need to re-read the HPTE after
433 * the invalidation sequence has completed in order to
434 * obtain reliable values of R and C.
435 */
436 remove_revmap_chain(kvm, pte_index, rev, v,
437 be64_to_cpu(hpte[1]));
432 } 438 }
433 r = rev->guest_rpte & ~HPTE_GR_RESERVED; 439 r = rev->guest_rpte & ~HPTE_GR_RESERVED;
434 note_hpte_modification(kvm, rev); 440 note_hpte_modification(kvm, rev);