aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/mmu.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-01-05 19:36:47 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2007-01-06 02:55:25 -0500
commitebeace8609205bf5e1b96fe325b7dea148042232 (patch)
tree863991639d7df2a4aa5fd8b20b6b3a28c2cc2cda /drivers/kvm/mmu.c
parentcc4529efc7b730b596d9c7d5a917c00a357e92aa (diff)
[PATCH] KVM: MMU: oom handling
When beginning to process a page fault, make sure we have enough shadow pages available to service the fault. If not, free some pages. Signed-off-by: Avi Kivity <avi@qumranet.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r--drivers/kvm/mmu.c49
1 files changed, 21 insertions, 28 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index d788866d5a6f..e4a20a45d834 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -310,6 +310,7 @@ static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
310 list_del(&page_head->link); 310 list_del(&page_head->link);
311 page_head->page_hpa = page_hpa; 311 page_head->page_hpa = page_hpa;
312 list_add(&page_head->link, &vcpu->free_pages); 312 list_add(&page_head->link, &vcpu->free_pages);
313 ++vcpu->kvm->n_free_mmu_pages;
313} 314}
314 315
315static int is_empty_shadow_page(hpa_t page_hpa) 316static int is_empty_shadow_page(hpa_t page_hpa)
@@ -344,6 +345,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
344 page->global = 1; 345 page->global = 1;
345 page->multimapped = 0; 346 page->multimapped = 0;
346 page->parent_pte = parent_pte; 347 page->parent_pte = parent_pte;
348 --vcpu->kvm->n_free_mmu_pages;
347 return page; 349 return page;
348} 350}
349 351
@@ -544,8 +546,7 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
544 } 546 }
545 kvm_mmu_page_unlink_children(vcpu, page); 547 kvm_mmu_page_unlink_children(vcpu, page);
546 hlist_del(&page->hash_link); 548 hlist_del(&page->hash_link);
547 list_del(&page->link); 549 kvm_mmu_free_page(vcpu, page->page_hpa);
548 list_add(&page->link, &vcpu->free_pages);
549} 550}
550 551
551static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) 552static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -743,18 +744,6 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
743 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root); 744 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
744} 745}
745 746
746static void nonpaging_flush(struct kvm_vcpu *vcpu)
747{
748 hpa_t root = vcpu->mmu.root_hpa;
749
750 ++kvm_stat.tlb_flush;
751 pgprintk("nonpaging_flush\n");
752 mmu_free_roots(vcpu);
753 mmu_alloc_roots(vcpu);
754 kvm_arch_ops->set_cr3(vcpu, root);
755 kvm_arch_ops->tlb_flush(vcpu);
756}
757
758static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) 747static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
759{ 748{
760 return vaddr; 749 return vaddr;
@@ -763,28 +752,19 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
763static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, 752static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
764 u32 error_code) 753 u32 error_code)
765{ 754{
766 int ret;
767 gpa_t addr = gva; 755 gpa_t addr = gva;
756 hpa_t paddr;
768 757
769 ASSERT(vcpu); 758 ASSERT(vcpu);
770 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa)); 759 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
771 760
772 for (;;) {
773 hpa_t paddr;
774 761
775 paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK); 762 paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
776 763
777 if (is_error_hpa(paddr)) 764 if (is_error_hpa(paddr))
778 return 1; 765 return 1;
779 766
780 ret = nonpaging_map(vcpu, addr & PAGE_MASK, paddr); 767 return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
781 if (ret) {
782 nonpaging_flush(vcpu);
783 continue;
784 }
785 break;
786 }
787 return ret;
788} 768}
789 769
790static void nonpaging_inval_page(struct kvm_vcpu *vcpu, gva_t addr) 770static void nonpaging_inval_page(struct kvm_vcpu *vcpu, gva_t addr)
@@ -1093,6 +1073,18 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1093 return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT); 1073 return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
1094} 1074}
1095 1075
1076void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1077{
1078 while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1079 struct kvm_mmu_page *page;
1080
1081 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1082 struct kvm_mmu_page, link);
1083 kvm_mmu_zap_page(vcpu, page);
1084 }
1085}
1086EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages);
1087
1096static void free_mmu_pages(struct kvm_vcpu *vcpu) 1088static void free_mmu_pages(struct kvm_vcpu *vcpu)
1097{ 1089{
1098 while (!list_empty(&vcpu->free_pages)) { 1090 while (!list_empty(&vcpu->free_pages)) {
@@ -1124,6 +1116,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1124 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT; 1116 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
1125 memset(__va(page_header->page_hpa), 0, PAGE_SIZE); 1117 memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
1126 list_add(&page_header->link, &vcpu->free_pages); 1118 list_add(&page_header->link, &vcpu->free_pages);
1119 ++vcpu->kvm->n_free_mmu_pages;
1127 } 1120 }
1128 1121
1129 /* 1122 /*