aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-01-05 19:36:45 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2007-01-06 02:55:25 -0500
commita436036baf331703b4d2c8e8a45f02c597bf6913 (patch)
treeba134b0b42ca42c53e818073af8d51d73771c56b /drivers/kvm
parent9b7a032567ee1128daeebebfc14d3acedfe28c8c (diff)
[PATCH] KVM: MMU: If emulating an instruction fails, try unprotecting the page
A page table may have been recycled into a regular page, and so any instruction can be executed on it. Unprotect the page and let the cpu do its thing. Signed-off-by: Avi Kivity <avi@qumranet.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/kvm.h1
-rw-r--r--drivers/kvm/kvm_main.c2
-rw-r--r--drivers/kvm/mmu.c58
3 files changed, 61 insertions, 0 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index b7068ecd7765..34c43bb4d348 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -450,6 +450,7 @@ unsigned long segment_base(u16 selector);
450 450
451void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); 451void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
452void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); 452void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
453int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
453 454
454static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn) 455static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn)
455{ 456{
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 047f6f6ed3f6..79032438dd16 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -1063,6 +1063,8 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
1063 } 1063 }
1064 1064
1065 if (r) { 1065 if (r) {
1066 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1067 return EMULATE_DONE;
1066 if (!vcpu->mmio_needed) { 1068 if (!vcpu->mmio_needed) {
1067 report_emulation_failure(&emulate_ctxt); 1069 report_emulation_failure(&emulate_ctxt);
1068 return EMULATE_FAIL; 1070 return EMULATE_FAIL;
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 6dbd83b86623..1484b7211717 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -478,11 +478,62 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
478 return page; 478 return page;
479} 479}
480 480
481static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
482 struct kvm_mmu_page *page)
483{
484 BUG();
485}
486
481static void kvm_mmu_put_page(struct kvm_vcpu *vcpu, 487static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
482 struct kvm_mmu_page *page, 488 struct kvm_mmu_page *page,
483 u64 *parent_pte) 489 u64 *parent_pte)
484{ 490{
485 mmu_page_remove_parent_pte(page, parent_pte); 491 mmu_page_remove_parent_pte(page, parent_pte);
492 if (page->role.level > PT_PAGE_TABLE_LEVEL)
493 kvm_mmu_page_unlink_children(vcpu, page);
494 hlist_del(&page->hash_link);
495 list_del(&page->link);
496 list_add(&page->link, &vcpu->free_pages);
497}
498
499static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
500 struct kvm_mmu_page *page)
501{
502 u64 *parent_pte;
503
504 while (page->multimapped || page->parent_pte) {
505 if (!page->multimapped)
506 parent_pte = page->parent_pte;
507 else {
508 struct kvm_pte_chain *chain;
509
510 chain = container_of(page->parent_ptes.first,
511 struct kvm_pte_chain, link);
512 parent_pte = chain->parent_ptes[0];
513 }
514 kvm_mmu_put_page(vcpu, page, parent_pte);
515 *parent_pte = 0;
516 }
517}
518
519static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
520{
521 unsigned index;
522 struct hlist_head *bucket;
523 struct kvm_mmu_page *page;
524 struct hlist_node *node, *n;
525 int r;
526
527 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
528 r = 0;
529 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
530 bucket = &vcpu->kvm->mmu_page_hash[index];
531 hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
532 if (page->gfn == gfn && !page->role.metaphysical) {
533 kvm_mmu_zap_page(vcpu, page);
534 r = 1;
535 }
536 return r;
486} 537}
487 538
488static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa) 539static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
@@ -1001,6 +1052,13 @@ void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1001{ 1052{
1002} 1053}
1003 1054
1055int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1056{
1057 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1058
1059 return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
1060}
1061
1004static void free_mmu_pages(struct kvm_vcpu *vcpu) 1062static void free_mmu_pages(struct kvm_vcpu *vcpu)
1005{ 1063{
1006 while (!list_empty(&vcpu->free_pages)) { 1064 while (!list_empty(&vcpu->free_pages)) {