summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2019-02-05 15:54:17 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2019-02-20 16:48:32 -0500
commit152482580a1b0accb60676063a1ac57b2d12daf6 (patch)
tree4c87b94da258dd28ff3e8da0c7efb5c8b6c5da68
parent4183683918efc3549b5ebddde4ed5edfdac45c17 (diff)
KVM: Call kvm_arch_memslots_updated() before updating memslots
kvm_arch_memslots_updated() is at this point in time an x86-specific hook for handling MMIO generation wraparound. x86 stashes 19 bits of the memslots generation number in its MMIO sptes in order to avoid full page fault walks for repeat faults on emulated MMIO addresses. Because only 19 bits are used, wrapping the MMIO generation number is possible, if unlikely. kvm_arch_memslots_updated() alerts x86 that the generation has changed so that it can invalidate all MMIO sptes in case the effective MMIO generation has wrapped so as to avoid using a stale spte, e.g. a (very) old spte that was created with generation==0. Given that the purpose of kvm_arch_memslots_updated() is to prevent consuming stale entries, it needs to be called before the new generation is propagated to memslots. Invalidating the MMIO sptes after updating memslots means that there is a window where a vCPU could dereference the new memslots generation, e.g. 0, and incorrectly reuse an old MMIO spte that was created with (pre-wrap) generation==0. Fixes: e59dbe09f8e6 ("KVM: Introduce kvm_arch_memslots_updated()") Cc: <stable@vger.kernel.org> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/mips/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/mmu.c4
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--virt/kvm/arm/mmu.c2
-rw-r--r--virt/kvm/kvm_main.c7
9 files changed, 15 insertions, 12 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index d2abd98471e8..41204a49cf95 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -1134,7 +1134,7 @@ static inline void kvm_arch_hardware_unsetup(void) {}
1134static inline void kvm_arch_sync_events(struct kvm *kvm) {} 1134static inline void kvm_arch_sync_events(struct kvm *kvm) {}
1135static inline void kvm_arch_free_memslot(struct kvm *kvm, 1135static inline void kvm_arch_free_memslot(struct kvm *kvm,
1136 struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} 1136 struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
1137static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {} 1137static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
1138static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} 1138static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
1139static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} 1139static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
1140static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} 1140static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 0f98f00da2ea..19693b8add93 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -837,7 +837,7 @@ struct kvm_vcpu_arch {
837static inline void kvm_arch_hardware_disable(void) {} 837static inline void kvm_arch_hardware_disable(void) {}
838static inline void kvm_arch_hardware_unsetup(void) {} 838static inline void kvm_arch_hardware_unsetup(void) {}
839static inline void kvm_arch_sync_events(struct kvm *kvm) {} 839static inline void kvm_arch_sync_events(struct kvm *kvm) {}
840static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {} 840static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
841static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} 841static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
842static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} 842static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
843static inline void kvm_arch_exit(void) {} 843static inline void kvm_arch_exit(void) {}
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index d5d24889c3bc..c2b8c8c6c9be 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -878,7 +878,7 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
878static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} 878static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
879static inline void kvm_arch_free_memslot(struct kvm *kvm, 879static inline void kvm_arch_free_memslot(struct kvm *kvm,
880 struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} 880 struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
881static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {} 881static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
882static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} 882static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
883static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 883static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
884 struct kvm_memory_slot *slot) {} 884 struct kvm_memory_slot *slot) {}
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0e2ef41efb9d..c4758e1a8843 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1254,7 +1254,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1254 struct kvm_memory_slot *slot, 1254 struct kvm_memory_slot *slot,
1255 gfn_t gfn_offset, unsigned long mask); 1255 gfn_t gfn_offset, unsigned long mask);
1256void kvm_mmu_zap_all(struct kvm *kvm); 1256void kvm_mmu_zap_all(struct kvm *kvm);
1257void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots); 1257void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
1258unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 1258unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
1259void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 1259void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
1260 1260
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 415d0e62cb3e..a53a0e7ad9e6 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5893,13 +5893,13 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5893 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); 5893 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5894} 5894}
5895 5895
5896void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots) 5896void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
5897{ 5897{
5898 /* 5898 /*
5899 * The very rare case: if the generation-number is round, 5899 * The very rare case: if the generation-number is round,
5900 * zap all shadow pages. 5900 * zap all shadow pages.
5901 */ 5901 */
5902 if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) { 5902 if (unlikely((gen & MMIO_GEN_MASK) == 0)) {
5903 kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n"); 5903 kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5904 kvm_mmu_invalidate_zap_all_pages(kvm); 5904 kvm_mmu_invalidate_zap_all_pages(kvm);
5905 } 5905 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3de586f89730..03d26ffb29cd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9357,13 +9357,13 @@ out_free:
9357 return -ENOMEM; 9357 return -ENOMEM;
9358} 9358}
9359 9359
9360void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) 9360void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
9361{ 9361{
9362 /* 9362 /*
9363 * memslots->generation has been incremented. 9363 * memslots->generation has been incremented.
9364 * mmio generation may have reached its maximum value. 9364 * mmio generation may have reached its maximum value.
9365 */ 9365 */
9366 kvm_mmu_invalidate_mmio_sptes(kvm, slots); 9366 kvm_mmu_invalidate_mmio_sptes(kvm, gen);
9367} 9367}
9368 9368
9369int kvm_arch_prepare_memory_region(struct kvm *kvm, 9369int kvm_arch_prepare_memory_region(struct kvm *kvm,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c38cc5eb7e73..cf761ff58224 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -634,7 +634,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
634 struct kvm_memory_slot *dont); 634 struct kvm_memory_slot *dont);
635int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 635int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
636 unsigned long npages); 636 unsigned long npages);
637void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots); 637void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
638int kvm_arch_prepare_memory_region(struct kvm *kvm, 638int kvm_arch_prepare_memory_region(struct kvm *kvm,
639 struct kvm_memory_slot *memslot, 639 struct kvm_memory_slot *memslot,
640 const struct kvm_userspace_memory_region *mem, 640 const struct kvm_userspace_memory_region *mem,
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index fbdf3ac2f001..e0355e0f8712 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -2350,7 +2350,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2350 return 0; 2350 return 0;
2351} 2351}
2352 2352
2353void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) 2353void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
2354{ 2354{
2355} 2355}
2356 2356
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 0a0ea8f4bb1b..d54f6578a849 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -874,6 +874,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
874 int as_id, struct kvm_memslots *slots) 874 int as_id, struct kvm_memslots *slots)
875{ 875{
876 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); 876 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
877 u64 gen;
877 878
878 /* 879 /*
879 * Set the low bit in the generation, which disables SPTE caching 880 * Set the low bit in the generation, which disables SPTE caching
@@ -896,9 +897,11 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
896 * space 0 will use generations 0, 4, 8, ... while * address space 1 will 897 * space 0 will use generations 0, 4, 8, ... while * address space 1 will
897 * use generations 2, 6, 10, 14, ... 898 * use generations 2, 6, 10, 14, ...
898 */ 899 */
899 slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1; 900 gen = slots->generation + KVM_ADDRESS_SPACE_NUM * 2 - 1;
900 901
901 kvm_arch_memslots_updated(kvm, slots); 902 kvm_arch_memslots_updated(kvm, gen);
903
904 slots->generation = gen;
902 905
903 return old_memslots; 906 return old_memslots;
904} 907}