diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kvm/mmu.c | 33 |
1 files changed, 30 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index fe9d6f10e7a9..674c0442ac89 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1654,6 +1654,16 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, | |||
1654 | static void kvm_mmu_commit_zap_page(struct kvm *kvm, | 1654 | static void kvm_mmu_commit_zap_page(struct kvm *kvm, |
1655 | struct list_head *invalid_list); | 1655 | struct list_head *invalid_list); |
1656 | 1656 | ||
1657 | /* | ||
1658 | * NOTE: we should pay more attention on the zapped-obsolete page | ||
1659 | * (is_obsolete_sp(sp) && sp->role.invalid) when you do hash list walk | ||
1660 | * since it has been deleted from active_mmu_pages but still can be found | ||
1661 | * at hast list. | ||
1662 | * | ||
1663 | * for_each_gfn_indirect_valid_sp has skipped that kind of page and | ||
1664 | * kvm_mmu_get_page(), the only user of for_each_gfn_sp(), has skipped | ||
1665 | * all the obsolete pages. | ||
1666 | */ | ||
1657 | #define for_each_gfn_sp(_kvm, _sp, _gfn) \ | 1667 | #define for_each_gfn_sp(_kvm, _sp, _gfn) \ |
1658 | hlist_for_each_entry(_sp, \ | 1668 | hlist_for_each_entry(_sp, \ |
1659 | &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ | 1669 | &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ |
@@ -4224,11 +4234,13 @@ restart: | |||
4224 | if (sp->role.invalid) | 4234 | if (sp->role.invalid) |
4225 | continue; | 4235 | continue; |
4226 | 4236 | ||
4237 | /* | ||
4238 | * Need not flush tlb since we only zap the sp with invalid | ||
4239 | * generation number. | ||
4240 | */ | ||
4227 | if (batch >= BATCH_ZAP_PAGES && | 4241 | if (batch >= BATCH_ZAP_PAGES && |
4228 | (need_resched() || spin_needbreak(&kvm->mmu_lock))) { | 4242 | cond_resched_lock(&kvm->mmu_lock)) { |
4229 | batch = 0; | 4243 | batch = 0; |
4230 | kvm_mmu_commit_zap_page(kvm, &invalid_list); | ||
4231 | cond_resched_lock(&kvm->mmu_lock); | ||
4232 | goto restart; | 4244 | goto restart; |
4233 | } | 4245 | } |
4234 | 4246 | ||
@@ -4239,6 +4251,10 @@ restart: | |||
4239 | goto restart; | 4251 | goto restart; |
4240 | } | 4252 | } |
4241 | 4253 | ||
4254 | /* | ||
4255 | * Should flush tlb before free page tables since lockless-walking | ||
4256 | * may use the pages. | ||
4257 | */ | ||
4242 | kvm_mmu_commit_zap_page(kvm, &invalid_list); | 4258 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
4243 | } | 4259 | } |
4244 | 4260 | ||
@@ -4257,6 +4273,17 @@ void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm) | |||
4257 | trace_kvm_mmu_invalidate_zap_all_pages(kvm); | 4273 | trace_kvm_mmu_invalidate_zap_all_pages(kvm); |
4258 | kvm->arch.mmu_valid_gen++; | 4274 | kvm->arch.mmu_valid_gen++; |
4259 | 4275 | ||
4276 | /* | ||
4277 | * Notify all vcpus to reload its shadow page table | ||
4278 | * and flush TLB. Then all vcpus will switch to new | ||
4279 | * shadow page table with the new mmu_valid_gen. | ||
4280 | * | ||
4281 | * Note: we should do this under the protection of | ||
4282 | * mmu-lock, otherwise, vcpu would purge shadow page | ||
4283 | * but miss tlb flush. | ||
4284 | */ | ||
4285 | kvm_reload_remote_mmus(kvm); | ||
4286 | |||
4260 | kvm_zap_obsolete_pages(kvm); | 4287 | kvm_zap_obsolete_pages(kvm); |
4261 | spin_unlock(&kvm->mmu_lock); | 4288 | spin_unlock(&kvm->mmu_lock); |
4262 | } | 4289 | } |