diff options
author | Avi Kivity <avi@qumranet.com> | 2007-01-05 19:36:45 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2007-01-06 02:55:25 -0500 |
commit | a436036baf331703b4d2c8e8a45f02c597bf6913 (patch) | |
tree | ba134b0b42ca42c53e818073af8d51d73771c56b | |
parent | 9b7a032567ee1128daeebebfc14d3acedfe28c8c (diff) |
[PATCH] KVM: MMU: If emulating an instruction fails, try unprotecting the page
A page table may have been recycled into a regular page, and so any
instruction can be executed on it. Unprotect the page and let the cpu do its
thing.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | drivers/kvm/kvm.h | 1 | ||||
-rw-r--r-- | drivers/kvm/kvm_main.c | 2 | ||||
-rw-r--r-- | drivers/kvm/mmu.c | 58 |
3 files changed, 61 insertions, 0 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index b7068ecd776..34c43bb4d34 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -450,6 +450,7 @@ unsigned long segment_base(u16 selector); | |||
450 | 450 | ||
451 | void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); | 451 | void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); |
452 | void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); | 452 | void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); |
453 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); | ||
453 | 454 | ||
454 | static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn) | 455 | static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn) |
455 | { | 456 | { |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 047f6f6ed3f..79032438dd1 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -1063,6 +1063,8 @@ int emulate_instruction(struct kvm_vcpu *vcpu, | |||
1063 | } | 1063 | } |
1064 | 1064 | ||
1065 | if (r) { | 1065 | if (r) { |
1066 | if (kvm_mmu_unprotect_page_virt(vcpu, cr2)) | ||
1067 | return EMULATE_DONE; | ||
1066 | if (!vcpu->mmio_needed) { | 1068 | if (!vcpu->mmio_needed) { |
1067 | report_emulation_failure(&emulate_ctxt); | 1069 | report_emulation_failure(&emulate_ctxt); |
1068 | return EMULATE_FAIL; | 1070 | return EMULATE_FAIL; |
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 6dbd83b8662..1484b721171 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -478,11 +478,62 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
478 | return page; | 478 | return page; |
479 | } | 479 | } |
480 | 480 | ||
481 | static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu, | ||
482 | struct kvm_mmu_page *page) | ||
483 | { | ||
484 | BUG(); | ||
485 | } | ||
486 | |||
481 | static void kvm_mmu_put_page(struct kvm_vcpu *vcpu, | 487 | static void kvm_mmu_put_page(struct kvm_vcpu *vcpu, |
482 | struct kvm_mmu_page *page, | 488 | struct kvm_mmu_page *page, |
483 | u64 *parent_pte) | 489 | u64 *parent_pte) |
484 | { | 490 | { |
485 | mmu_page_remove_parent_pte(page, parent_pte); | 491 | mmu_page_remove_parent_pte(page, parent_pte); |
492 | if (page->role.level > PT_PAGE_TABLE_LEVEL) | ||
493 | kvm_mmu_page_unlink_children(vcpu, page); | ||
494 | hlist_del(&page->hash_link); | ||
495 | list_del(&page->link); | ||
496 | list_add(&page->link, &vcpu->free_pages); | ||
497 | } | ||
498 | |||
499 | static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu, | ||
500 | struct kvm_mmu_page *page) | ||
501 | { | ||
502 | u64 *parent_pte; | ||
503 | |||
504 | while (page->multimapped || page->parent_pte) { | ||
505 | if (!page->multimapped) | ||
506 | parent_pte = page->parent_pte; | ||
507 | else { | ||
508 | struct kvm_pte_chain *chain; | ||
509 | |||
510 | chain = container_of(page->parent_ptes.first, | ||
511 | struct kvm_pte_chain, link); | ||
512 | parent_pte = chain->parent_ptes[0]; | ||
513 | } | ||
514 | kvm_mmu_put_page(vcpu, page, parent_pte); | ||
515 | *parent_pte = 0; | ||
516 | } | ||
517 | } | ||
518 | |||
519 | static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) | ||
520 | { | ||
521 | unsigned index; | ||
522 | struct hlist_head *bucket; | ||
523 | struct kvm_mmu_page *page; | ||
524 | struct hlist_node *node, *n; | ||
525 | int r; | ||
526 | |||
527 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); | ||
528 | r = 0; | ||
529 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | ||
530 | bucket = &vcpu->kvm->mmu_page_hash[index]; | ||
531 | hlist_for_each_entry_safe(page, node, n, bucket, hash_link) | ||
532 | if (page->gfn == gfn && !page->role.metaphysical) { | ||
533 | kvm_mmu_zap_page(vcpu, page); | ||
534 | r = 1; | ||
535 | } | ||
536 | return r; | ||
486 | } | 537 | } |
487 | 538 | ||
488 | static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa) | 539 | static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa) |
@@ -1001,6 +1052,13 @@ void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes) | |||
1001 | { | 1052 | { |
1002 | } | 1053 | } |
1003 | 1054 | ||
1055 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) | ||
1056 | { | ||
1057 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); | ||
1058 | |||
1059 | return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT); | ||
1060 | } | ||
1061 | |||
1004 | static void free_mmu_pages(struct kvm_vcpu *vcpu) | 1062 | static void free_mmu_pages(struct kvm_vcpu *vcpu) |
1005 | { | 1063 | { |
1006 | while (!list_empty(&vcpu->free_pages)) { | 1064 | while (!list_empty(&vcpu->free_pages)) { |