aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/mmu.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-01-05 19:36:40 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2007-01-06 02:55:24 -0500
commit17ac10ad2bb7d8c4f401668484b2e661a15726c6 (patch)
tree671a11b2be1d9f48e96eae8af2b05867f4199db0 /drivers/kvm/mmu.c
parentac79c978f173586ab3624427c89cd22b393cabd4 (diff)
[PATCH] KVM: MU: Special treatment for shadow pae root pages
Since we're not going to cache the pae-mode shadow root pages, allocate a single pae shadow that will hold the four lower-level pages, which will act as roots. Signed-off-by: Avi Kivity <avi@qumranet.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r--drivers/kvm/mmu.c110
1 files changed, 80 insertions, 30 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 0f27beb6c5d..1dcbbd51166 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -420,19 +420,63 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
420 } 420 }
421} 421}
422 422
423static void mmu_free_roots(struct kvm_vcpu *vcpu)
424{
425 int i;
426
427#ifdef CONFIG_X86_64
428 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
429 hpa_t root = vcpu->mmu.root_hpa;
430
431 ASSERT(VALID_PAGE(root));
432 release_pt_page_64(vcpu, root, PT64_ROOT_LEVEL);
433 vcpu->mmu.root_hpa = INVALID_PAGE;
434 return;
435 }
436#endif
437 for (i = 0; i < 4; ++i) {
438 hpa_t root = vcpu->mmu.pae_root[i];
439
440 ASSERT(VALID_PAGE(root));
441 root &= PT64_BASE_ADDR_MASK;
442 release_pt_page_64(vcpu, root, PT32E_ROOT_LEVEL - 1);
443 vcpu->mmu.pae_root[i] = INVALID_PAGE;
444 }
445 vcpu->mmu.root_hpa = INVALID_PAGE;
446}
447
448static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
449{
450 int i;
451
452#ifdef CONFIG_X86_64
453 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
454 hpa_t root = vcpu->mmu.root_hpa;
455
456 ASSERT(!VALID_PAGE(root));
457 root = kvm_mmu_alloc_page(vcpu, NULL);
458 vcpu->mmu.root_hpa = root;
459 return;
460 }
461#endif
462 for (i = 0; i < 4; ++i) {
463 hpa_t root = vcpu->mmu.pae_root[i];
464
465 ASSERT(!VALID_PAGE(root));
466 root = kvm_mmu_alloc_page(vcpu, NULL);
467 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
468 }
469 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
470}
471
423static void nonpaging_flush(struct kvm_vcpu *vcpu) 472static void nonpaging_flush(struct kvm_vcpu *vcpu)
424{ 473{
425 hpa_t root = vcpu->mmu.root_hpa; 474 hpa_t root = vcpu->mmu.root_hpa;
426 475
427 ++kvm_stat.tlb_flush; 476 ++kvm_stat.tlb_flush;
428 pgprintk("nonpaging_flush\n"); 477 pgprintk("nonpaging_flush\n");
429 ASSERT(VALID_PAGE(root)); 478 mmu_free_roots(vcpu);
430 release_pt_page_64(vcpu, root, vcpu->mmu.shadow_root_level); 479 mmu_alloc_roots(vcpu);
431 root = kvm_mmu_alloc_page(vcpu, NULL);
432 ASSERT(VALID_PAGE(root));
433 vcpu->mmu.root_hpa = root;
434 if (is_paging(vcpu))
435 root |= (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK));
436 kvm_arch_ops->set_cr3(vcpu, root); 480 kvm_arch_ops->set_cr3(vcpu, root);
437 kvm_arch_ops->tlb_flush(vcpu); 481 kvm_arch_ops->tlb_flush(vcpu);
438} 482}
@@ -475,13 +519,7 @@ static void nonpaging_inval_page(struct kvm_vcpu *vcpu, gva_t addr)
475 519
476static void nonpaging_free(struct kvm_vcpu *vcpu) 520static void nonpaging_free(struct kvm_vcpu *vcpu)
477{ 521{
478 hpa_t root; 522 mmu_free_roots(vcpu);
479
480 ASSERT(vcpu);
481 root = vcpu->mmu.root_hpa;
482 if (VALID_PAGE(root))
483 release_pt_page_64(vcpu, root, vcpu->mmu.shadow_root_level);
484 vcpu->mmu.root_hpa = INVALID_PAGE;
485} 523}
486 524
487static int nonpaging_init_context(struct kvm_vcpu *vcpu) 525static int nonpaging_init_context(struct kvm_vcpu *vcpu)
@@ -495,7 +533,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
495 context->free = nonpaging_free; 533 context->free = nonpaging_free;
496 context->root_level = PT32E_ROOT_LEVEL; 534 context->root_level = PT32E_ROOT_LEVEL;
497 context->shadow_root_level = PT32E_ROOT_LEVEL; 535 context->shadow_root_level = PT32E_ROOT_LEVEL;
498 context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL); 536 mmu_alloc_roots(vcpu);
499 ASSERT(VALID_PAGE(context->root_hpa)); 537 ASSERT(VALID_PAGE(context->root_hpa));
500 kvm_arch_ops->set_cr3(vcpu, context->root_hpa); 538 kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
501 return 0; 539 return 0;
@@ -647,7 +685,7 @@ static void paging_free(struct kvm_vcpu *vcpu)
647#include "paging_tmpl.h" 685#include "paging_tmpl.h"
648#undef PTTYPE 686#undef PTTYPE
649 687
650static int paging64_init_context(struct kvm_vcpu *vcpu) 688static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
651{ 689{
652 struct kvm_mmu *context = &vcpu->mmu; 690 struct kvm_mmu *context = &vcpu->mmu;
653 691
@@ -657,15 +695,20 @@ static int paging64_init_context(struct kvm_vcpu *vcpu)
657 context->inval_page = paging_inval_page; 695 context->inval_page = paging_inval_page;
658 context->gva_to_gpa = paging64_gva_to_gpa; 696 context->gva_to_gpa = paging64_gva_to_gpa;
659 context->free = paging_free; 697 context->free = paging_free;
660 context->root_level = PT64_ROOT_LEVEL; 698 context->root_level = level;
661 context->shadow_root_level = PT64_ROOT_LEVEL; 699 context->shadow_root_level = level;
662 context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL); 700 mmu_alloc_roots(vcpu);
663 ASSERT(VALID_PAGE(context->root_hpa)); 701 ASSERT(VALID_PAGE(context->root_hpa));
664 kvm_arch_ops->set_cr3(vcpu, context->root_hpa | 702 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
665 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK))); 703 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
666 return 0; 704 return 0;
667} 705}
668 706
707static int paging64_init_context(struct kvm_vcpu *vcpu)
708{
709 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
710}
711
669static int paging32_init_context(struct kvm_vcpu *vcpu) 712static int paging32_init_context(struct kvm_vcpu *vcpu)
670{ 713{
671 struct kvm_mmu *context = &vcpu->mmu; 714 struct kvm_mmu *context = &vcpu->mmu;
@@ -677,7 +720,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
677 context->free = paging_free; 720 context->free = paging_free;
678 context->root_level = PT32_ROOT_LEVEL; 721 context->root_level = PT32_ROOT_LEVEL;
679 context->shadow_root_level = PT32E_ROOT_LEVEL; 722 context->shadow_root_level = PT32E_ROOT_LEVEL;
680 context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL); 723 mmu_alloc_roots(vcpu);
681 ASSERT(VALID_PAGE(context->root_hpa)); 724 ASSERT(VALID_PAGE(context->root_hpa));
682 kvm_arch_ops->set_cr3(vcpu, context->root_hpa | 725 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
683 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK))); 726 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
@@ -686,14 +729,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
686 729
687static int paging32E_init_context(struct kvm_vcpu *vcpu) 730static int paging32E_init_context(struct kvm_vcpu *vcpu)
688{ 731{
689 int ret; 732 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
690
691 if ((ret = paging64_init_context(vcpu)))
692 return ret;
693
694 vcpu->mmu.root_level = PT32E_ROOT_LEVEL;
695 vcpu->mmu.shadow_root_level = PT32E_ROOT_LEVEL;
696 return 0;
697} 733}
698 734
699static int init_kvm_mmu(struct kvm_vcpu *vcpu) 735static int init_kvm_mmu(struct kvm_vcpu *vcpu)
@@ -737,26 +773,40 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
737 __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT)); 773 __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
738 page->page_hpa = INVALID_PAGE; 774 page->page_hpa = INVALID_PAGE;
739 } 775 }
776 free_page((unsigned long)vcpu->mmu.pae_root);
740} 777}
741 778
742static int alloc_mmu_pages(struct kvm_vcpu *vcpu) 779static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
743{ 780{
781 struct page *page;
744 int i; 782 int i;
745 783
746 ASSERT(vcpu); 784 ASSERT(vcpu);
747 785
748 for (i = 0; i < KVM_NUM_MMU_PAGES; i++) { 786 for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
749 struct page *page;
750 struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i]; 787 struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
751 788
752 INIT_LIST_HEAD(&page_header->link); 789 INIT_LIST_HEAD(&page_header->link);
753 if ((page = alloc_page(GFP_KVM_MMU)) == NULL) 790 if ((page = alloc_page(GFP_KERNEL)) == NULL)
754 goto error_1; 791 goto error_1;
755 page->private = (unsigned long)page_header; 792 page->private = (unsigned long)page_header;
756 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT; 793 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
757 memset(__va(page_header->page_hpa), 0, PAGE_SIZE); 794 memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
758 list_add(&page_header->link, &vcpu->free_pages); 795 list_add(&page_header->link, &vcpu->free_pages);
759 } 796 }
797
798 /*
799 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
800 * Therefore we need to allocate shadow page tables in the first
801 * 4GB of memory, which happens to fit the DMA32 zone.
802 */
803 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
804 if (!page)
805 goto error_1;
806 vcpu->mmu.pae_root = page_address(page);
807 for (i = 0; i < 4; ++i)
808 vcpu->mmu.pae_root[i] = INVALID_PAGE;
809
760 return 0; 810 return 0;
761 811
762error_1: 812error_1: