aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/kvm/kvm.h12
-rw-r--r--drivers/kvm/mmu.c49
-rw-r--r--drivers/kvm/paging_tmpl.h15
-rw-r--r--drivers/kvm/svm.c2
-rw-r--r--drivers/kvm/vmx.c2
5 files changed, 37 insertions, 43 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 34c43bb4d348..1d0be85651f5 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -52,6 +52,8 @@
52#define KVM_MAX_VCPUS 1 52#define KVM_MAX_VCPUS 1
53#define KVM_MEMORY_SLOTS 4 53#define KVM_MEMORY_SLOTS 4
54#define KVM_NUM_MMU_PAGES 256 54#define KVM_NUM_MMU_PAGES 256
55#define KVM_MIN_FREE_MMU_PAGES 5
56#define KVM_REFILL_PAGES 25
55 57
56#define FX_IMAGE_SIZE 512 58#define FX_IMAGE_SIZE 512
57#define FX_IMAGE_ALIGN 16 59#define FX_IMAGE_ALIGN 16
@@ -278,6 +280,7 @@ struct kvm {
278 * Hash table of struct kvm_mmu_page. 280 * Hash table of struct kvm_mmu_page.
279 */ 281 */
280 struct list_head active_mmu_pages; 282 struct list_head active_mmu_pages;
283 int n_free_mmu_pages;
281 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 284 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
282 struct kvm_vcpu vcpus[KVM_MAX_VCPUS]; 285 struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
283 int memory_config_version; 286 int memory_config_version;
@@ -451,6 +454,15 @@ unsigned long segment_base(u16 selector);
451void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); 454void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
452void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); 455void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
453int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 456int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
457void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
458
459static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
460 u32 error_code)
461{
462 if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
463 kvm_mmu_free_some_pages(vcpu);
464 return vcpu->mmu.page_fault(vcpu, gva, error_code);
465}
454 466
455static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn) 467static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn)
456{ 468{
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index d788866d5a6f..e4a20a45d834 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -310,6 +310,7 @@ static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
310 list_del(&page_head->link); 310 list_del(&page_head->link);
311 page_head->page_hpa = page_hpa; 311 page_head->page_hpa = page_hpa;
312 list_add(&page_head->link, &vcpu->free_pages); 312 list_add(&page_head->link, &vcpu->free_pages);
313 ++vcpu->kvm->n_free_mmu_pages;
313} 314}
314 315
315static int is_empty_shadow_page(hpa_t page_hpa) 316static int is_empty_shadow_page(hpa_t page_hpa)
@@ -344,6 +345,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
344 page->global = 1; 345 page->global = 1;
345 page->multimapped = 0; 346 page->multimapped = 0;
346 page->parent_pte = parent_pte; 347 page->parent_pte = parent_pte;
348 --vcpu->kvm->n_free_mmu_pages;
347 return page; 349 return page;
348} 350}
349 351
@@ -544,8 +546,7 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
544 } 546 }
545 kvm_mmu_page_unlink_children(vcpu, page); 547 kvm_mmu_page_unlink_children(vcpu, page);
546 hlist_del(&page->hash_link); 548 hlist_del(&page->hash_link);
547 list_del(&page->link); 549 kvm_mmu_free_page(vcpu, page->page_hpa);
548 list_add(&page->link, &vcpu->free_pages);
549} 550}
550 551
551static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) 552static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -743,18 +744,6 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
743 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root); 744 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
744} 745}
745 746
746static void nonpaging_flush(struct kvm_vcpu *vcpu)
747{
748 hpa_t root = vcpu->mmu.root_hpa;
749
750 ++kvm_stat.tlb_flush;
751 pgprintk("nonpaging_flush\n");
752 mmu_free_roots(vcpu);
753 mmu_alloc_roots(vcpu);
754 kvm_arch_ops->set_cr3(vcpu, root);
755 kvm_arch_ops->tlb_flush(vcpu);
756}
757
758static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) 747static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
759{ 748{
760 return vaddr; 749 return vaddr;
@@ -763,28 +752,19 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
763static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, 752static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
764 u32 error_code) 753 u32 error_code)
765{ 754{
766 int ret;
767 gpa_t addr = gva; 755 gpa_t addr = gva;
756 hpa_t paddr;
768 757
769 ASSERT(vcpu); 758 ASSERT(vcpu);
770 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa)); 759 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
771 760
772 for (;;) {
773 hpa_t paddr;
774 761
775 paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK); 762 paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
776 763
777 if (is_error_hpa(paddr)) 764 if (is_error_hpa(paddr))
778 return 1; 765 return 1;
779 766
780 ret = nonpaging_map(vcpu, addr & PAGE_MASK, paddr); 767 return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
781 if (ret) {
782 nonpaging_flush(vcpu);
783 continue;
784 }
785 break;
786 }
787 return ret;
788} 768}
789 769
790static void nonpaging_inval_page(struct kvm_vcpu *vcpu, gva_t addr) 770static void nonpaging_inval_page(struct kvm_vcpu *vcpu, gva_t addr)
@@ -1093,6 +1073,18 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1093 return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT); 1073 return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
1094} 1074}
1095 1075
1076void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1077{
1078 while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1079 struct kvm_mmu_page *page;
1080
1081 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1082 struct kvm_mmu_page, link);
1083 kvm_mmu_zap_page(vcpu, page);
1084 }
1085}
1086EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages);
1087
1096static void free_mmu_pages(struct kvm_vcpu *vcpu) 1088static void free_mmu_pages(struct kvm_vcpu *vcpu)
1097{ 1089{
1098 while (!list_empty(&vcpu->free_pages)) { 1090 while (!list_empty(&vcpu->free_pages)) {
@@ -1124,6 +1116,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1124 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT; 1116 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
1125 memset(__va(page_header->page_hpa), 0, PAGE_SIZE); 1117 memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
1126 list_add(&page_header->link, &vcpu->free_pages); 1118 list_add(&page_header->link, &vcpu->free_pages);
1119 ++vcpu->kvm->n_free_mmu_pages;
1127 } 1120 }
1128 1121
1129 /* 1122 /*
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index cf4b74cc75b5..03c474aaedde 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -246,8 +246,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
246 } 246 }
247 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, 247 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
248 metaphysical, shadow_ent); 248 metaphysical, shadow_ent);
249 if (!shadow_page)
250 return ERR_PTR(-ENOMEM);
251 shadow_addr = shadow_page->page_hpa; 249 shadow_addr = shadow_page->page_hpa;
252 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK 250 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
253 | PT_WRITABLE_MASK | PT_USER_MASK; 251 | PT_WRITABLE_MASK | PT_USER_MASK;
@@ -347,17 +345,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
347 /* 345 /*
348 * Look up the shadow pte for the faulting address. 346 * Look up the shadow pte for the faulting address.
349 */ 347 */
350 for (;;) { 348 FNAME(walk_addr)(&walker, vcpu, addr);
351 FNAME(walk_addr)(&walker, vcpu, addr); 349 shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
352 shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
353 if (IS_ERR(shadow_pte)) { /* must be -ENOMEM */
354 printk("%s: oom\n", __FUNCTION__);
355 nonpaging_flush(vcpu);
356 FNAME(release_walker)(&walker);
357 continue;
358 }
359 break;
360 }
361 350
362 /* 351 /*
363 * The page is not mapped by the guest. Let the guest handle it. 352 * The page is not mapped by the guest. Let the guest handle it.
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index 91c7f60ffd42..869b524dda6b 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -861,7 +861,7 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
861 861
862 fault_address = vcpu->svm->vmcb->control.exit_info_2; 862 fault_address = vcpu->svm->vmcb->control.exit_info_2;
863 error_code = vcpu->svm->vmcb->control.exit_info_1; 863 error_code = vcpu->svm->vmcb->control.exit_info_1;
864 if (!vcpu->mmu.page_fault(vcpu, fault_address, error_code)) { 864 if (!kvm_mmu_page_fault(vcpu, fault_address, error_code)) {
865 spin_unlock(&vcpu->kvm->lock); 865 spin_unlock(&vcpu->kvm->lock);
866 return 1; 866 return 1;
867 } 867 }
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index aaa98e3e9caf..2a1c37eed711 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -1318,7 +1318,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1318 cr2 = vmcs_readl(EXIT_QUALIFICATION); 1318 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1319 1319
1320 spin_lock(&vcpu->kvm->lock); 1320 spin_lock(&vcpu->kvm->lock);
1321 if (!vcpu->mmu.page_fault(vcpu, cr2, error_code)) { 1321 if (!kvm_mmu_page_fault(vcpu, cr2, error_code)) {
1322 spin_unlock(&vcpu->kvm->lock); 1322 spin_unlock(&vcpu->kvm->lock);
1323 return 1; 1323 return 1;
1324 } 1324 }