aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/mmu.c
diff options
context:
space:
mode:
authorZhang Xiantao <xiantao.zhang@intel.com>2007-12-13 21:01:48 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:58:10 -0500
commitf05e70ac03a6614af12194a014b338ec5594cb5c (patch)
tree1dcaea0f519167ad75c99a2c4e7c46e08050a8fe /drivers/kvm/mmu.c
parentd69fb81f0554fb980e4b1d3db4e44351c2c4a4a2 (diff)
KVM: Portability: Move mmu-related fields to kvm_arch
This patches moves mmu-related fields to kvm_arch. Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com> Acked-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r--drivers/kvm/mmu.c58
1 files changed, 30 insertions, 28 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 1dc0e8c02c70..c26d83f86a3a 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -553,7 +553,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
553 __free_page(virt_to_page(sp->spt)); 553 __free_page(virt_to_page(sp->spt));
554 __free_page(virt_to_page(sp->gfns)); 554 __free_page(virt_to_page(sp->gfns));
555 kfree(sp); 555 kfree(sp);
556 ++kvm->n_free_mmu_pages; 556 ++kvm->arch.n_free_mmu_pages;
557} 557}
558 558
559static unsigned kvm_page_table_hashfn(gfn_t gfn) 559static unsigned kvm_page_table_hashfn(gfn_t gfn)
@@ -566,19 +566,19 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
566{ 566{
567 struct kvm_mmu_page *sp; 567 struct kvm_mmu_page *sp;
568 568
569 if (!vcpu->kvm->n_free_mmu_pages) 569 if (!vcpu->kvm->arch.n_free_mmu_pages)
570 return NULL; 570 return NULL;
571 571
572 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp); 572 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
573 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); 573 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
574 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); 574 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
575 set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 575 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
576 list_add(&sp->link, &vcpu->kvm->active_mmu_pages); 576 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
577 ASSERT(is_empty_shadow_page(sp->spt)); 577 ASSERT(is_empty_shadow_page(sp->spt));
578 sp->slot_bitmap = 0; 578 sp->slot_bitmap = 0;
579 sp->multimapped = 0; 579 sp->multimapped = 0;
580 sp->parent_pte = parent_pte; 580 sp->parent_pte = parent_pte;
581 --vcpu->kvm->n_free_mmu_pages; 581 --vcpu->kvm->arch.n_free_mmu_pages;
582 return sp; 582 return sp;
583} 583}
584 584
@@ -666,7 +666,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
666 666
667 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); 667 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
668 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 668 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
669 bucket = &kvm->mmu_page_hash[index]; 669 bucket = &kvm->arch.mmu_page_hash[index];
670 hlist_for_each_entry(sp, node, bucket, hash_link) 670 hlist_for_each_entry(sp, node, bucket, hash_link)
671 if (sp->gfn == gfn && !sp->role.metaphysical) { 671 if (sp->gfn == gfn && !sp->role.metaphysical) {
672 pgprintk("%s: found role %x\n", 672 pgprintk("%s: found role %x\n",
@@ -705,7 +705,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
705 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__, 705 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
706 gfn, role.word); 706 gfn, role.word);
707 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 707 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
708 bucket = &vcpu->kvm->mmu_page_hash[index]; 708 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
709 hlist_for_each_entry(sp, node, bucket, hash_link) 709 hlist_for_each_entry(sp, node, bucket, hash_link)
710 if (sp->gfn == gfn && sp->role.word == role.word) { 710 if (sp->gfn == gfn && sp->role.word == role.word) {
711 mmu_page_add_parent_pte(vcpu, sp, parent_pte); 711 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
@@ -796,7 +796,7 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
796 hlist_del(&sp->hash_link); 796 hlist_del(&sp->hash_link);
797 kvm_mmu_free_page(kvm, sp); 797 kvm_mmu_free_page(kvm, sp);
798 } else 798 } else
799 list_move(&sp->link, &kvm->active_mmu_pages); 799 list_move(&sp->link, &kvm->arch.active_mmu_pages);
800 kvm_mmu_reset_last_pte_updated(kvm); 800 kvm_mmu_reset_last_pte_updated(kvm);
801} 801}
802 802
@@ -812,26 +812,26 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
812 * change the value 812 * change the value
813 */ 813 */
814 814
815 if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) > 815 if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
816 kvm_nr_mmu_pages) { 816 kvm_nr_mmu_pages) {
817 int n_used_mmu_pages = kvm->n_alloc_mmu_pages 817 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
818 - kvm->n_free_mmu_pages; 818 - kvm->arch.n_free_mmu_pages;
819 819
820 while (n_used_mmu_pages > kvm_nr_mmu_pages) { 820 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
821 struct kvm_mmu_page *page; 821 struct kvm_mmu_page *page;
822 822
823 page = container_of(kvm->active_mmu_pages.prev, 823 page = container_of(kvm->arch.active_mmu_pages.prev,
824 struct kvm_mmu_page, link); 824 struct kvm_mmu_page, link);
825 kvm_mmu_zap_page(kvm, page); 825 kvm_mmu_zap_page(kvm, page);
826 n_used_mmu_pages--; 826 n_used_mmu_pages--;
827 } 827 }
828 kvm->n_free_mmu_pages = 0; 828 kvm->arch.n_free_mmu_pages = 0;
829 } 829 }
830 else 830 else
831 kvm->n_free_mmu_pages += kvm_nr_mmu_pages 831 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
832 - kvm->n_alloc_mmu_pages; 832 - kvm->arch.n_alloc_mmu_pages;
833 833
834 kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages; 834 kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
835} 835}
836 836
837static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) 837static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
@@ -845,7 +845,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
845 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); 845 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
846 r = 0; 846 r = 0;
847 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 847 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
848 bucket = &kvm->mmu_page_hash[index]; 848 bucket = &kvm->arch.mmu_page_hash[index];
849 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) 849 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
850 if (sp->gfn == gfn && !sp->role.metaphysical) { 850 if (sp->gfn == gfn && !sp->role.metaphysical) {
851 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, 851 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
@@ -1362,7 +1362,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1362 vcpu->arch.last_pte_updated = NULL; 1362 vcpu->arch.last_pte_updated = NULL;
1363 } 1363 }
1364 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 1364 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1365 bucket = &vcpu->kvm->mmu_page_hash[index]; 1365 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1366 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { 1366 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1367 if (sp->gfn != gfn || sp->role.metaphysical) 1367 if (sp->gfn != gfn || sp->role.metaphysical)
1368 continue; 1368 continue;
@@ -1428,10 +1428,10 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1428 1428
1429void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 1429void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1430{ 1430{
1431 while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) { 1431 while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
1432 struct kvm_mmu_page *sp; 1432 struct kvm_mmu_page *sp;
1433 1433
1434 sp = container_of(vcpu->kvm->active_mmu_pages.prev, 1434 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
1435 struct kvm_mmu_page, link); 1435 struct kvm_mmu_page, link);
1436 kvm_mmu_zap_page(vcpu->kvm, sp); 1436 kvm_mmu_zap_page(vcpu->kvm, sp);
1437 ++vcpu->kvm->stat.mmu_recycled; 1437 ++vcpu->kvm->stat.mmu_recycled;
@@ -1482,8 +1482,8 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
1482{ 1482{
1483 struct kvm_mmu_page *sp; 1483 struct kvm_mmu_page *sp;
1484 1484
1485 while (!list_empty(&vcpu->kvm->active_mmu_pages)) { 1485 while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
1486 sp = container_of(vcpu->kvm->active_mmu_pages.next, 1486 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
1487 struct kvm_mmu_page, link); 1487 struct kvm_mmu_page, link);
1488 kvm_mmu_zap_page(vcpu->kvm, sp); 1488 kvm_mmu_zap_page(vcpu->kvm, sp);
1489 } 1489 }
@@ -1497,10 +1497,12 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1497 1497
1498 ASSERT(vcpu); 1498 ASSERT(vcpu);
1499 1499
1500 if (vcpu->kvm->n_requested_mmu_pages) 1500 if (vcpu->kvm->arch.n_requested_mmu_pages)
1501 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages; 1501 vcpu->kvm->arch.n_free_mmu_pages =
1502 vcpu->kvm->arch.n_requested_mmu_pages;
1502 else 1503 else
1503 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages; 1504 vcpu->kvm->arch.n_free_mmu_pages =
1505 vcpu->kvm->arch.n_alloc_mmu_pages;
1504 /* 1506 /*
1505 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. 1507 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1506 * Therefore we need to allocate shadow page tables in the first 1508 * Therefore we need to allocate shadow page tables in the first
@@ -1549,7 +1551,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1549{ 1551{
1550 struct kvm_mmu_page *sp; 1552 struct kvm_mmu_page *sp;
1551 1553
1552 list_for_each_entry(sp, &kvm->active_mmu_pages, link) { 1554 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
1553 int i; 1555 int i;
1554 u64 *pt; 1556 u64 *pt;
1555 1557
@@ -1568,7 +1570,7 @@ void kvm_mmu_zap_all(struct kvm *kvm)
1568{ 1570{
1569 struct kvm_mmu_page *sp, *node; 1571 struct kvm_mmu_page *sp, *node;
1570 1572
1571 list_for_each_entry_safe(sp, node, &kvm->active_mmu_pages, link) 1573 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
1572 kvm_mmu_zap_page(kvm, sp); 1574 kvm_mmu_zap_page(kvm, sp);
1573 1575
1574 kvm_flush_remote_tlbs(kvm); 1576 kvm_flush_remote_tlbs(kvm);
@@ -1738,7 +1740,7 @@ static int count_writable_mappings(struct kvm_vcpu *vcpu)
1738 struct kvm_mmu_page *sp; 1740 struct kvm_mmu_page *sp;
1739 int i; 1741 int i;
1740 1742
1741 list_for_each_entry(sp, &vcpu->kvm->active_mmu_pages, link) { 1743 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
1742 u64 *pt = sp->spt; 1744 u64 *pt = sp->spt;
1743 1745
1744 if (sp->role.level != PT_PAGE_TABLE_LEVEL) 1746 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
@@ -1774,7 +1776,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
1774 unsigned long *rmapp; 1776 unsigned long *rmapp;
1775 gfn_t gfn; 1777 gfn_t gfn;
1776 1778
1777 list_for_each_entry(sp, &vcpu->kvm->active_mmu_pages, link) { 1779 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
1778 if (sp->role.metaphysical) 1780 if (sp->role.metaphysical)
1779 continue; 1781 continue;
1780 1782