diff options
author | Jérôme Glisse <jglisse@redhat.com> | 2017-08-31 17:17:37 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-08-31 19:13:00 -0400 |
commit | fb1522e099f0c69f36655af233a64e3f55941f5b (patch) | |
tree | fa8f0545ef52dadf403060a0a9a61dee5860af84 | |
parent | a81461b0546cbf57128f34e3dd6013bcdbdf0371 (diff) |
KVM: update to new mmu_notifier semantic v2
Calls to mmu_notifier_invalidate_page() were replaced by calls to
mmu_notifier_invalidate_range() and are now bracketed by calls to
mmu_notifier_invalidate_range_start()/end()
Remove now useless invalidate_page callback.
Changed since v1 (Linus Torvalds)
- remove now useless kvm_arch_mmu_notifier_invalidate_page()
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Tested-by: Mike Galbraith <efault@gmx.de>
Tested-by: Adam Borowski <kilobyte@angband.pl>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: kvm@vger.kernel.org
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/arm/include/asm/kvm_host.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 6 | ||||
-rw-r--r-- | arch/mips/include/asm/kvm_host.h | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 11 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 42 |
7 files changed, 0 insertions, 77 deletions
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 127e2dd2e21c..4a879f6ff13b 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -225,12 +225,6 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | |||
225 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); | 225 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); |
226 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); | 226 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); |
227 | 227 | ||
228 | /* We do not have shadow page tables, hence the empty hooks */ | ||
229 | static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | ||
230 | unsigned long address) | ||
231 | { | ||
232 | } | ||
233 | |||
234 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void); | 228 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void); |
235 | struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); | 229 | struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); |
236 | void kvm_arm_halt_guest(struct kvm *kvm); | 230 | void kvm_arm_halt_guest(struct kvm *kvm); |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index d68630007b14..e923b58606e2 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -326,12 +326,6 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | |||
326 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); | 326 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); |
327 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); | 327 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); |
328 | 328 | ||
329 | /* We do not have shadow page tables, hence the empty hooks */ | ||
330 | static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | ||
331 | unsigned long address) | ||
332 | { | ||
333 | } | ||
334 | |||
335 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void); | 329 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void); |
336 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); | 330 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); |
337 | void kvm_arm_halt_guest(struct kvm *kvm); | 331 | void kvm_arm_halt_guest(struct kvm *kvm); |
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 2998479fd4e8..a9af1d2dcd69 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h | |||
@@ -938,11 +938,6 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | |||
938 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); | 938 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); |
939 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); | 939 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); |
940 | 940 | ||
941 | static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | ||
942 | unsigned long address) | ||
943 | { | ||
944 | } | ||
945 | |||
946 | /* Emulation */ | 941 | /* Emulation */ |
947 | int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); | 942 | int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); |
948 | enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); | 943 | enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 8b3f1238d07f..e372ed871c51 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -67,11 +67,6 @@ extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); | |||
67 | extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); | 67 | extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); |
68 | extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | 68 | extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
69 | 69 | ||
70 | static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | ||
71 | unsigned long address) | ||
72 | { | ||
73 | } | ||
74 | |||
75 | #define HPTEG_CACHE_NUM (1 << 15) | 70 | #define HPTEG_CACHE_NUM (1 << 15) |
76 | #define HPTEG_HASH_BITS_PTE 13 | 71 | #define HPTEG_HASH_BITS_PTE 13 |
77 | #define HPTEG_HASH_BITS_PTE_LONG 12 | 72 | #define HPTEG_HASH_BITS_PTE_LONG 12 |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f4d120a3e22e..92c9032502d8 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -1375,8 +1375,6 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | |||
1375 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); | 1375 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); |
1376 | void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); | 1376 | void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); |
1377 | void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); | 1377 | void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); |
1378 | void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | ||
1379 | unsigned long address); | ||
1380 | 1378 | ||
1381 | void kvm_define_shared_msr(unsigned index, u32 msr); | 1379 | void kvm_define_shared_msr(unsigned index, u32 msr); |
1382 | int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); | 1380 | int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 05a5e57c6f39..272320eb328c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -6734,17 +6734,6 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) | |||
6734 | } | 6734 | } |
6735 | EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page); | 6735 | EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page); |
6736 | 6736 | ||
6737 | void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | ||
6738 | unsigned long address) | ||
6739 | { | ||
6740 | /* | ||
6741 | * The physical address of apic access page is stored in the VMCS. | ||
6742 | * Update it when it becomes invalid. | ||
6743 | */ | ||
6744 | if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT)) | ||
6745 | kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); | ||
6746 | } | ||
6747 | |||
6748 | /* | 6737 | /* |
6749 | * Returns 1 to let vcpu_run() continue the guest execution loop without | 6738 | * Returns 1 to let vcpu_run() continue the guest execution loop without |
6750 | * exiting to the userspace. Otherwise, the value will be returned to the | 6739 | * exiting to the userspace. Otherwise, the value will be returned to the |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 15252d723b54..4d81f6ded88e 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -322,47 +322,6 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) | |||
322 | return container_of(mn, struct kvm, mmu_notifier); | 322 | return container_of(mn, struct kvm, mmu_notifier); |
323 | } | 323 | } |
324 | 324 | ||
325 | static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, | ||
326 | struct mm_struct *mm, | ||
327 | unsigned long address) | ||
328 | { | ||
329 | struct kvm *kvm = mmu_notifier_to_kvm(mn); | ||
330 | int need_tlb_flush, idx; | ||
331 | |||
332 | /* | ||
333 | * When ->invalidate_page runs, the linux pte has been zapped | ||
334 | * already but the page is still allocated until | ||
335 | * ->invalidate_page returns. So if we increase the sequence | ||
336 | * here the kvm page fault will notice if the spte can't be | ||
337 | * established because the page is going to be freed. If | ||
338 | * instead the kvm page fault establishes the spte before | ||
339 | * ->invalidate_page runs, kvm_unmap_hva will release it | ||
340 | * before returning. | ||
341 | * | ||
342 | * The sequence increase only need to be seen at spin_unlock | ||
343 | * time, and not at spin_lock time. | ||
344 | * | ||
345 | * Increasing the sequence after the spin_unlock would be | ||
346 | * unsafe because the kvm page fault could then establish the | ||
347 | * pte after kvm_unmap_hva returned, without noticing the page | ||
348 | * is going to be freed. | ||
349 | */ | ||
350 | idx = srcu_read_lock(&kvm->srcu); | ||
351 | spin_lock(&kvm->mmu_lock); | ||
352 | |||
353 | kvm->mmu_notifier_seq++; | ||
354 | need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; | ||
355 | /* we've to flush the tlb before the pages can be freed */ | ||
356 | if (need_tlb_flush) | ||
357 | kvm_flush_remote_tlbs(kvm); | ||
358 | |||
359 | spin_unlock(&kvm->mmu_lock); | ||
360 | |||
361 | kvm_arch_mmu_notifier_invalidate_page(kvm, address); | ||
362 | |||
363 | srcu_read_unlock(&kvm->srcu, idx); | ||
364 | } | ||
365 | |||
366 | static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, | 325 | static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, |
367 | struct mm_struct *mm, | 326 | struct mm_struct *mm, |
368 | unsigned long address, | 327 | unsigned long address, |
@@ -510,7 +469,6 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, | |||
510 | } | 469 | } |
511 | 470 | ||
512 | static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { | 471 | static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { |
513 | .invalidate_page = kvm_mmu_notifier_invalidate_page, | ||
514 | .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, | 472 | .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, |
515 | .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, | 473 | .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, |
516 | .clear_flush_young = kvm_mmu_notifier_clear_flush_young, | 474 | .clear_flush_young = kvm_mmu_notifier_clear_flush_young, |