aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2013-05-30 20:36:22 -0400
committerGleb Natapov <gleb@redhat.com>2013-06-05 05:32:33 -0400
commit5304b8d37c2a5ebca48330f5e7868d240eafbed1 (patch)
treeca8df12267cf394c2cb871b7c3ebd742902a55fb /arch/x86/include
parenta2ae162265e88bf5490ce54fd5f2d430d6d992b7 (diff)
KVM: MMU: fast invalidate all pages
The current kvm_mmu_zap_all is really slow - it is holding mmu-lock to walk and zap all shadow pages one by one, also it need to zap all guest page's rmap and all shadow page's parent spte list. Particularly, things become worse if guest uses more memory or vcpus. It is not good for scalability In this patch, we introduce a faster way to invalidate all shadow pages. KVM maintains a global mmu invalid generation-number which is stored in kvm->arch.mmu_valid_gen and every shadow page stores the current global generation-number into sp->mmu_valid_gen when it is created When KVM need zap all shadow pages sptes, it just simply increase the global generation-number then reload root shadow pages on all vcpus. Vcpu will create a new shadow page table according to current kvm's generation-number. It ensures the old pages are not used any more. Then the obsolete pages (sp->mmu_valid_gen != kvm->arch.mmu_valid_gen) are zapped by using lock-break technique Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/kvm_host.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3741c653767c..bff7d464a6ae 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -222,6 +222,7 @@ struct kvm_mmu_page {
222 int root_count; /* Currently serving as active root */ 222 int root_count; /* Currently serving as active root */
223 unsigned int unsync_children; 223 unsigned int unsync_children;
224 unsigned long parent_ptes; /* Reverse mapping for parent_pte */ 224 unsigned long parent_ptes; /* Reverse mapping for parent_pte */
225 unsigned long mmu_valid_gen;
225 DECLARE_BITMAP(unsync_child_bitmap, 512); 226 DECLARE_BITMAP(unsync_child_bitmap, 512);
226 227
227#ifdef CONFIG_X86_32 228#ifdef CONFIG_X86_32
@@ -529,6 +530,7 @@ struct kvm_arch {
529 unsigned int n_requested_mmu_pages; 530 unsigned int n_requested_mmu_pages;
530 unsigned int n_max_mmu_pages; 531 unsigned int n_max_mmu_pages;
531 unsigned int indirect_shadow_pages; 532 unsigned int indirect_shadow_pages;
533 unsigned long mmu_valid_gen;
532 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 534 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
533 /* 535 /*
534 * Hash table of struct kvm_mmu_page. 536 * Hash table of struct kvm_mmu_page.