diff options
author | Avi Kivity <avi@qumranet.com> | 2007-01-05 19:36:51 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2007-01-06 02:55:26 -0500 |
commit | 3bb65a22a4502067f8cd3cb4c923ffa70be62091 (patch) | |
tree | b8ee904c9a4b99f0c8f58e93b257f653222e3ba0 /drivers/kvm | |
parent | 86a5ba025d0a0b251817d0efbeaf7037d4175d21 (diff) |
[PATCH] KVM: MMU: Never free a shadow page actively serving as a root
We always need cr3 to point to something valid, so if we detect that we're
freeing a root page, simply push it back to the top of the active list.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/kvm')
-rw-r--r-- | drivers/kvm/kvm.h | 1 | ||||
-rw-r--r-- | drivers/kvm/mmu.c | 20 |
2 files changed, 19 insertions, 2 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 201b2735ca91..b24a86e1f434 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -134,6 +134,7 @@ struct kvm_mmu_page { | |||
134 | */ | 134 | */ |
135 | int global; /* Set if all ptes in this page are global */ | 135 | int global; /* Set if all ptes in this page are global */ |
136 | int multimapped; /* More than one parent_pte? */ | 136 | int multimapped; /* More than one parent_pte? */ |
137 | int root_count; /* Currently serving as active root */ | ||
137 | union { | 138 | union { |
138 | u64 *parent_pte; /* !multimapped */ | 139 | u64 *parent_pte; /* !multimapped */ |
139 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ | 140 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ |
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 0e44aca9eee7..f16321498093 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -550,8 +550,13 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu, | |||
550 | *parent_pte = 0; | 550 | *parent_pte = 0; |
551 | } | 551 | } |
552 | kvm_mmu_page_unlink_children(vcpu, page); | 552 | kvm_mmu_page_unlink_children(vcpu, page); |
553 | hlist_del(&page->hash_link); | 553 | if (!page->root_count) { |
554 | kvm_mmu_free_page(vcpu, page->page_hpa); | 554 | hlist_del(&page->hash_link); |
555 | kvm_mmu_free_page(vcpu, page->page_hpa); | ||
556 | } else { | ||
557 | list_del(&page->link); | ||
558 | list_add(&page->link, &vcpu->kvm->active_mmu_pages); | ||
559 | } | ||
555 | } | 560 | } |
556 | 561 | ||
557 | static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) | 562 | static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) |
@@ -667,12 +672,15 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p) | |||
667 | static void mmu_free_roots(struct kvm_vcpu *vcpu) | 672 | static void mmu_free_roots(struct kvm_vcpu *vcpu) |
668 | { | 673 | { |
669 | int i; | 674 | int i; |
675 | struct kvm_mmu_page *page; | ||
670 | 676 | ||
671 | #ifdef CONFIG_X86_64 | 677 | #ifdef CONFIG_X86_64 |
672 | if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { | 678 | if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { |
673 | hpa_t root = vcpu->mmu.root_hpa; | 679 | hpa_t root = vcpu->mmu.root_hpa; |
674 | 680 | ||
675 | ASSERT(VALID_PAGE(root)); | 681 | ASSERT(VALID_PAGE(root)); |
682 | page = page_header(root); | ||
683 | --page->root_count; | ||
676 | vcpu->mmu.root_hpa = INVALID_PAGE; | 684 | vcpu->mmu.root_hpa = INVALID_PAGE; |
677 | return; | 685 | return; |
678 | } | 686 | } |
@@ -682,6 +690,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu) | |||
682 | 690 | ||
683 | ASSERT(VALID_PAGE(root)); | 691 | ASSERT(VALID_PAGE(root)); |
684 | root &= PT64_BASE_ADDR_MASK; | 692 | root &= PT64_BASE_ADDR_MASK; |
693 | page = page_header(root); | ||
694 | --page->root_count; | ||
685 | vcpu->mmu.pae_root[i] = INVALID_PAGE; | 695 | vcpu->mmu.pae_root[i] = INVALID_PAGE; |
686 | } | 696 | } |
687 | vcpu->mmu.root_hpa = INVALID_PAGE; | 697 | vcpu->mmu.root_hpa = INVALID_PAGE; |
@@ -691,6 +701,8 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
691 | { | 701 | { |
692 | int i; | 702 | int i; |
693 | gfn_t root_gfn; | 703 | gfn_t root_gfn; |
704 | struct kvm_mmu_page *page; | ||
705 | |||
694 | root_gfn = vcpu->cr3 >> PAGE_SHIFT; | 706 | root_gfn = vcpu->cr3 >> PAGE_SHIFT; |
695 | 707 | ||
696 | #ifdef CONFIG_X86_64 | 708 | #ifdef CONFIG_X86_64 |
@@ -700,6 +712,8 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
700 | ASSERT(!VALID_PAGE(root)); | 712 | ASSERT(!VALID_PAGE(root)); |
701 | root = kvm_mmu_get_page(vcpu, root_gfn, 0, | 713 | root = kvm_mmu_get_page(vcpu, root_gfn, 0, |
702 | PT64_ROOT_LEVEL, 0, NULL)->page_hpa; | 714 | PT64_ROOT_LEVEL, 0, NULL)->page_hpa; |
715 | page = page_header(root); | ||
716 | ++page->root_count; | ||
703 | vcpu->mmu.root_hpa = root; | 717 | vcpu->mmu.root_hpa = root; |
704 | return; | 718 | return; |
705 | } | 719 | } |
@@ -715,6 +729,8 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
715 | root = kvm_mmu_get_page(vcpu, root_gfn, i << 30, | 729 | root = kvm_mmu_get_page(vcpu, root_gfn, i << 30, |
716 | PT32_ROOT_LEVEL, !is_paging(vcpu), | 730 | PT32_ROOT_LEVEL, !is_paging(vcpu), |
717 | NULL)->page_hpa; | 731 | NULL)->page_hpa; |
732 | page = page_header(root); | ||
733 | ++page->root_count; | ||
718 | vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; | 734 | vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; |
719 | } | 735 | } |
720 | vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root); | 736 | vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root); |