summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2019-02-05 15:54:17 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2019-02-20 16:48:32 -0500
commit152482580a1b0accb60676063a1ac57b2d12daf6 (patch)
tree4c87b94da258dd28ff3e8da0c7efb5c8b6c5da68 /virt
parent4183683918efc3549b5ebddde4ed5edfdac45c17 (diff)
KVM: Call kvm_arch_memslots_updated() before updating memslots
kvm_arch_memslots_updated() is at this point in time an x86-specific hook for handling MMIO generation wraparound. x86 stashes 19 bits of the memslots generation number in its MMIO sptes in order to avoid full page fault walks for repeat faults on emulated MMIO addresses. Because only 19 bits are used, wrapping the MMIO generation number is possible, if unlikely. kvm_arch_memslots_updated() alerts x86 that the generation has changed so that it can invalidate all MMIO sptes in case the effective MMIO generation has wrapped so as to avoid using a stale spte, e.g. a (very) old spte that was created with generation==0. Given that the purpose of kvm_arch_memslots_updated() is to prevent consuming stale entries, it needs to be called before the new generation is propagated to memslots. Invalidating the MMIO sptes after updating memslots means that there is a window where a vCPU could dereference the new memslots generation, e.g. 0, and incorrectly reuse an old MMIO spte that was created with (pre-wrap) generation==0. Fixes: e59dbe09f8e6 ("KVM: Introduce kvm_arch_memslots_updated()") Cc: <stable@vger.kernel.org> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/mmu.c2
-rw-r--r--virt/kvm/kvm_main.c7
2 files changed, 6 insertions, 3 deletions
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index fbdf3ac2f001..e0355e0f8712 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -2350,7 +2350,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2350 return 0; 2350 return 0;
2351} 2351}
2352 2352
2353void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) 2353void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
2354{ 2354{
2355} 2355}
2356 2356
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 0a0ea8f4bb1b..d54f6578a849 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -874,6 +874,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
874 int as_id, struct kvm_memslots *slots) 874 int as_id, struct kvm_memslots *slots)
875{ 875{
876 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); 876 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
877 u64 gen;
877 878
878 /* 879 /*
879 * Set the low bit in the generation, which disables SPTE caching 880 * Set the low bit in the generation, which disables SPTE caching
@@ -896,9 +897,11 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
896 * space 0 will use generations 0, 4, 8, ... while * address space 1 will 897 * space 0 will use generations 0, 4, 8, ... while * address space 1 will
897 * use generations 2, 6, 10, 14, ... 898 * use generations 2, 6, 10, 14, ...
898 */ 899 */
899 slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1; 900 gen = slots->generation + KVM_ADDRESS_SPACE_NUM * 2 - 1;
900 901
901 kvm_arch_memslots_updated(kvm, slots); 902 kvm_arch_memslots_updated(kvm, gen);
903
904 slots->generation = gen;
902 905
903 return old_memslots; 906 return old_memslots;
904} 907}