diff options
author | Avi Kivity <avi@qumranet.com> | 2007-01-05 19:36:46 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2007-01-06 02:55:25 -0500 |
commit | 697fe2e24ac49f03a82f6cfe5d77f7a2122ff382 (patch) | |
tree | 26de8b1535ea7af9124e85985b37069e7d1fd604 /drivers/kvm/mmu.c | |
parent | a436036baf331703b4d2c8e8a45f02c597bf6913 (diff) |
[PATCH] KVM: MMU: Implement child shadow unlinking
When removing a page table, we must maintain the parent_pte field all child
shadow page tables.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r-- | drivers/kvm/mmu.c | 42 |
1 files changed, 38 insertions, 4 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 1484b721171..7e20dbf4f84 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -402,12 +402,21 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page, | |||
402 | break; | 402 | break; |
403 | if (pte_chain->parent_ptes[i] != parent_pte) | 403 | if (pte_chain->parent_ptes[i] != parent_pte) |
404 | continue; | 404 | continue; |
405 | while (i + 1 < NR_PTE_CHAIN_ENTRIES) { | 405 | while (i + 1 < NR_PTE_CHAIN_ENTRIES |
406 | && pte_chain->parent_ptes[i + 1]) { | ||
406 | pte_chain->parent_ptes[i] | 407 | pte_chain->parent_ptes[i] |
407 | = pte_chain->parent_ptes[i + 1]; | 408 | = pte_chain->parent_ptes[i + 1]; |
408 | ++i; | 409 | ++i; |
409 | } | 410 | } |
410 | pte_chain->parent_ptes[i] = NULL; | 411 | pte_chain->parent_ptes[i] = NULL; |
412 | if (i == 0) { | ||
413 | hlist_del(&pte_chain->link); | ||
414 | kfree(pte_chain); | ||
415 | if (hlist_empty(&page->parent_ptes)) { | ||
416 | page->multimapped = 0; | ||
417 | page->parent_pte = NULL; | ||
418 | } | ||
419 | } | ||
411 | return; | 420 | return; |
412 | } | 421 | } |
413 | BUG(); | 422 | BUG(); |
@@ -481,7 +490,30 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
481 | static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu, | 490 | static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu, |
482 | struct kvm_mmu_page *page) | 491 | struct kvm_mmu_page *page) |
483 | { | 492 | { |
484 | BUG(); | 493 | unsigned i; |
494 | u64 *pt; | ||
495 | u64 ent; | ||
496 | |||
497 | pt = __va(page->page_hpa); | ||
498 | |||
499 | if (page->role.level == PT_PAGE_TABLE_LEVEL) { | ||
500 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { | ||
501 | if (pt[i] & PT_PRESENT_MASK) | ||
502 | rmap_remove(vcpu->kvm, &pt[i]); | ||
503 | pt[i] = 0; | ||
504 | } | ||
505 | return; | ||
506 | } | ||
507 | |||
508 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { | ||
509 | ent = pt[i]; | ||
510 | |||
511 | pt[i] = 0; | ||
512 | if (!(ent & PT_PRESENT_MASK)) | ||
513 | continue; | ||
514 | ent &= PT64_BASE_ADDR_MASK; | ||
515 | mmu_page_remove_parent_pte(page_header(ent), &pt[i]); | ||
516 | } | ||
485 | } | 517 | } |
486 | 518 | ||
487 | static void kvm_mmu_put_page(struct kvm_vcpu *vcpu, | 519 | static void kvm_mmu_put_page(struct kvm_vcpu *vcpu, |
@@ -489,8 +521,7 @@ static void kvm_mmu_put_page(struct kvm_vcpu *vcpu, | |||
489 | u64 *parent_pte) | 521 | u64 *parent_pte) |
490 | { | 522 | { |
491 | mmu_page_remove_parent_pte(page, parent_pte); | 523 | mmu_page_remove_parent_pte(page, parent_pte); |
492 | if (page->role.level > PT_PAGE_TABLE_LEVEL) | 524 | kvm_mmu_page_unlink_children(vcpu, page); |
493 | kvm_mmu_page_unlink_children(vcpu, page); | ||
494 | hlist_del(&page->hash_link); | 525 | hlist_del(&page->hash_link); |
495 | list_del(&page->link); | 526 | list_del(&page->link); |
496 | list_add(&page->link, &vcpu->free_pages); | 527 | list_add(&page->link, &vcpu->free_pages); |
@@ -511,6 +542,7 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu, | |||
511 | struct kvm_pte_chain, link); | 542 | struct kvm_pte_chain, link); |
512 | parent_pte = chain->parent_ptes[0]; | 543 | parent_pte = chain->parent_ptes[0]; |
513 | } | 544 | } |
545 | BUG_ON(!parent_pte); | ||
514 | kvm_mmu_put_page(vcpu, page, parent_pte); | 546 | kvm_mmu_put_page(vcpu, page, parent_pte); |
515 | *parent_pte = 0; | 547 | *parent_pte = 0; |
516 | } | 548 | } |
@@ -530,6 +562,8 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) | |||
530 | bucket = &vcpu->kvm->mmu_page_hash[index]; | 562 | bucket = &vcpu->kvm->mmu_page_hash[index]; |
531 | hlist_for_each_entry_safe(page, node, n, bucket, hash_link) | 563 | hlist_for_each_entry_safe(page, node, n, bucket, hash_link) |
532 | if (page->gfn == gfn && !page->role.metaphysical) { | 564 | if (page->gfn == gfn && !page->role.metaphysical) { |
565 | pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, | ||
566 | page->role.word); | ||
533 | kvm_mmu_zap_page(vcpu, page); | 567 | kvm_mmu_zap_page(vcpu, page); |
534 | r = 1; | 568 | r = 1; |
535 | } | 569 | } |