diff options
author | Zhang Xiantao <xiantao.zhang@intel.com> | 2007-12-13 21:01:48 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:58:10 -0500 |
commit | f05e70ac03a6614af12194a014b338ec5594cb5c (patch) | |
tree | 1dcaea0f519167ad75c99a2c4e7c46e08050a8fe /drivers/kvm | |
parent | d69fb81f0554fb980e4b1d3db4e44351c2c4a4a2 (diff) |
KVM: Portability: Move mmu-related fields to kvm_arch
This patches moves mmu-related fields to kvm_arch.
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
Acked-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r-- | drivers/kvm/kvm.h | 8 | ||||
-rw-r--r-- | drivers/kvm/mmu.c | 58 | ||||
-rw-r--r-- | drivers/kvm/mmu.h | 2 | ||||
-rw-r--r-- | drivers/kvm/x86.c | 8 | ||||
-rw-r--r-- | drivers/kvm/x86.h | 9 |
5 files changed, 44 insertions, 41 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index bf5b85c1f094..65de5e4225f7 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -119,14 +119,6 @@ struct kvm { | |||
119 | int nmemslots; | 119 | int nmemslots; |
120 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + | 120 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + |
121 | KVM_PRIVATE_MEM_SLOTS]; | 121 | KVM_PRIVATE_MEM_SLOTS]; |
122 | /* | ||
123 | * Hash table of struct kvm_mmu_page. | ||
124 | */ | ||
125 | struct list_head active_mmu_pages; | ||
126 | unsigned int n_free_mmu_pages; | ||
127 | unsigned int n_requested_mmu_pages; | ||
128 | unsigned int n_alloc_mmu_pages; | ||
129 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; | ||
130 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; | 122 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; |
131 | struct list_head vm_list; | 123 | struct list_head vm_list; |
132 | struct file *filp; | 124 | struct file *filp; |
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 1dc0e8c02c70..c26d83f86a3a 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -553,7 +553,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) | |||
553 | __free_page(virt_to_page(sp->spt)); | 553 | __free_page(virt_to_page(sp->spt)); |
554 | __free_page(virt_to_page(sp->gfns)); | 554 | __free_page(virt_to_page(sp->gfns)); |
555 | kfree(sp); | 555 | kfree(sp); |
556 | ++kvm->n_free_mmu_pages; | 556 | ++kvm->arch.n_free_mmu_pages; |
557 | } | 557 | } |
558 | 558 | ||
559 | static unsigned kvm_page_table_hashfn(gfn_t gfn) | 559 | static unsigned kvm_page_table_hashfn(gfn_t gfn) |
@@ -566,19 +566,19 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, | |||
566 | { | 566 | { |
567 | struct kvm_mmu_page *sp; | 567 | struct kvm_mmu_page *sp; |
568 | 568 | ||
569 | if (!vcpu->kvm->n_free_mmu_pages) | 569 | if (!vcpu->kvm->arch.n_free_mmu_pages) |
570 | return NULL; | 570 | return NULL; |
571 | 571 | ||
572 | sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp); | 572 | sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp); |
573 | sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); | 573 | sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); |
574 | sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); | 574 | sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); |
575 | set_page_private(virt_to_page(sp->spt), (unsigned long)sp); | 575 | set_page_private(virt_to_page(sp->spt), (unsigned long)sp); |
576 | list_add(&sp->link, &vcpu->kvm->active_mmu_pages); | 576 | list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); |
577 | ASSERT(is_empty_shadow_page(sp->spt)); | 577 | ASSERT(is_empty_shadow_page(sp->spt)); |
578 | sp->slot_bitmap = 0; | 578 | sp->slot_bitmap = 0; |
579 | sp->multimapped = 0; | 579 | sp->multimapped = 0; |
580 | sp->parent_pte = parent_pte; | 580 | sp->parent_pte = parent_pte; |
581 | --vcpu->kvm->n_free_mmu_pages; | 581 | --vcpu->kvm->arch.n_free_mmu_pages; |
582 | return sp; | 582 | return sp; |
583 | } | 583 | } |
584 | 584 | ||
@@ -666,7 +666,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) | |||
666 | 666 | ||
667 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); | 667 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); |
668 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | 668 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; |
669 | bucket = &kvm->mmu_page_hash[index]; | 669 | bucket = &kvm->arch.mmu_page_hash[index]; |
670 | hlist_for_each_entry(sp, node, bucket, hash_link) | 670 | hlist_for_each_entry(sp, node, bucket, hash_link) |
671 | if (sp->gfn == gfn && !sp->role.metaphysical) { | 671 | if (sp->gfn == gfn && !sp->role.metaphysical) { |
672 | pgprintk("%s: found role %x\n", | 672 | pgprintk("%s: found role %x\n", |
@@ -705,7 +705,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
705 | pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__, | 705 | pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__, |
706 | gfn, role.word); | 706 | gfn, role.word); |
707 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | 707 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; |
708 | bucket = &vcpu->kvm->mmu_page_hash[index]; | 708 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; |
709 | hlist_for_each_entry(sp, node, bucket, hash_link) | 709 | hlist_for_each_entry(sp, node, bucket, hash_link) |
710 | if (sp->gfn == gfn && sp->role.word == role.word) { | 710 | if (sp->gfn == gfn && sp->role.word == role.word) { |
711 | mmu_page_add_parent_pte(vcpu, sp, parent_pte); | 711 | mmu_page_add_parent_pte(vcpu, sp, parent_pte); |
@@ -796,7 +796,7 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) | |||
796 | hlist_del(&sp->hash_link); | 796 | hlist_del(&sp->hash_link); |
797 | kvm_mmu_free_page(kvm, sp); | 797 | kvm_mmu_free_page(kvm, sp); |
798 | } else | 798 | } else |
799 | list_move(&sp->link, &kvm->active_mmu_pages); | 799 | list_move(&sp->link, &kvm->arch.active_mmu_pages); |
800 | kvm_mmu_reset_last_pte_updated(kvm); | 800 | kvm_mmu_reset_last_pte_updated(kvm); |
801 | } | 801 | } |
802 | 802 | ||
@@ -812,26 +812,26 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) | |||
812 | * change the value | 812 | * change the value |
813 | */ | 813 | */ |
814 | 814 | ||
815 | if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) > | 815 | if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) > |
816 | kvm_nr_mmu_pages) { | 816 | kvm_nr_mmu_pages) { |
817 | int n_used_mmu_pages = kvm->n_alloc_mmu_pages | 817 | int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages |
818 | - kvm->n_free_mmu_pages; | 818 | - kvm->arch.n_free_mmu_pages; |
819 | 819 | ||
820 | while (n_used_mmu_pages > kvm_nr_mmu_pages) { | 820 | while (n_used_mmu_pages > kvm_nr_mmu_pages) { |
821 | struct kvm_mmu_page *page; | 821 | struct kvm_mmu_page *page; |
822 | 822 | ||
823 | page = container_of(kvm->active_mmu_pages.prev, | 823 | page = container_of(kvm->arch.active_mmu_pages.prev, |
824 | struct kvm_mmu_page, link); | 824 | struct kvm_mmu_page, link); |
825 | kvm_mmu_zap_page(kvm, page); | 825 | kvm_mmu_zap_page(kvm, page); |
826 | n_used_mmu_pages--; | 826 | n_used_mmu_pages--; |
827 | } | 827 | } |
828 | kvm->n_free_mmu_pages = 0; | 828 | kvm->arch.n_free_mmu_pages = 0; |
829 | } | 829 | } |
830 | else | 830 | else |
831 | kvm->n_free_mmu_pages += kvm_nr_mmu_pages | 831 | kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages |
832 | - kvm->n_alloc_mmu_pages; | 832 | - kvm->arch.n_alloc_mmu_pages; |
833 | 833 | ||
834 | kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages; | 834 | kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages; |
835 | } | 835 | } |
836 | 836 | ||
837 | static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | 837 | static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) |
@@ -845,7 +845,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | |||
845 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); | 845 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); |
846 | r = 0; | 846 | r = 0; |
847 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | 847 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; |
848 | bucket = &kvm->mmu_page_hash[index]; | 848 | bucket = &kvm->arch.mmu_page_hash[index]; |
849 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) | 849 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) |
850 | if (sp->gfn == gfn && !sp->role.metaphysical) { | 850 | if (sp->gfn == gfn && !sp->role.metaphysical) { |
851 | pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, | 851 | pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, |
@@ -1362,7 +1362,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1362 | vcpu->arch.last_pte_updated = NULL; | 1362 | vcpu->arch.last_pte_updated = NULL; |
1363 | } | 1363 | } |
1364 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | 1364 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; |
1365 | bucket = &vcpu->kvm->mmu_page_hash[index]; | 1365 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; |
1366 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { | 1366 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { |
1367 | if (sp->gfn != gfn || sp->role.metaphysical) | 1367 | if (sp->gfn != gfn || sp->role.metaphysical) |
1368 | continue; | 1368 | continue; |
@@ -1428,10 +1428,10 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) | |||
1428 | 1428 | ||
1429 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | 1429 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) |
1430 | { | 1430 | { |
1431 | while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) { | 1431 | while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) { |
1432 | struct kvm_mmu_page *sp; | 1432 | struct kvm_mmu_page *sp; |
1433 | 1433 | ||
1434 | sp = container_of(vcpu->kvm->active_mmu_pages.prev, | 1434 | sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, |
1435 | struct kvm_mmu_page, link); | 1435 | struct kvm_mmu_page, link); |
1436 | kvm_mmu_zap_page(vcpu->kvm, sp); | 1436 | kvm_mmu_zap_page(vcpu->kvm, sp); |
1437 | ++vcpu->kvm->stat.mmu_recycled; | 1437 | ++vcpu->kvm->stat.mmu_recycled; |
@@ -1482,8 +1482,8 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu) | |||
1482 | { | 1482 | { |
1483 | struct kvm_mmu_page *sp; | 1483 | struct kvm_mmu_page *sp; |
1484 | 1484 | ||
1485 | while (!list_empty(&vcpu->kvm->active_mmu_pages)) { | 1485 | while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) { |
1486 | sp = container_of(vcpu->kvm->active_mmu_pages.next, | 1486 | sp = container_of(vcpu->kvm->arch.active_mmu_pages.next, |
1487 | struct kvm_mmu_page, link); | 1487 | struct kvm_mmu_page, link); |
1488 | kvm_mmu_zap_page(vcpu->kvm, sp); | 1488 | kvm_mmu_zap_page(vcpu->kvm, sp); |
1489 | } | 1489 | } |
@@ -1497,10 +1497,12 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu) | |||
1497 | 1497 | ||
1498 | ASSERT(vcpu); | 1498 | ASSERT(vcpu); |
1499 | 1499 | ||
1500 | if (vcpu->kvm->n_requested_mmu_pages) | 1500 | if (vcpu->kvm->arch.n_requested_mmu_pages) |
1501 | vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages; | 1501 | vcpu->kvm->arch.n_free_mmu_pages = |
1502 | vcpu->kvm->arch.n_requested_mmu_pages; | ||
1502 | else | 1503 | else |
1503 | vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages; | 1504 | vcpu->kvm->arch.n_free_mmu_pages = |
1505 | vcpu->kvm->arch.n_alloc_mmu_pages; | ||
1504 | /* | 1506 | /* |
1505 | * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. | 1507 | * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. |
1506 | * Therefore we need to allocate shadow page tables in the first | 1508 | * Therefore we need to allocate shadow page tables in the first |
@@ -1549,7 +1551,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) | |||
1549 | { | 1551 | { |
1550 | struct kvm_mmu_page *sp; | 1552 | struct kvm_mmu_page *sp; |
1551 | 1553 | ||
1552 | list_for_each_entry(sp, &kvm->active_mmu_pages, link) { | 1554 | list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) { |
1553 | int i; | 1555 | int i; |
1554 | u64 *pt; | 1556 | u64 *pt; |
1555 | 1557 | ||
@@ -1568,7 +1570,7 @@ void kvm_mmu_zap_all(struct kvm *kvm) | |||
1568 | { | 1570 | { |
1569 | struct kvm_mmu_page *sp, *node; | 1571 | struct kvm_mmu_page *sp, *node; |
1570 | 1572 | ||
1571 | list_for_each_entry_safe(sp, node, &kvm->active_mmu_pages, link) | 1573 | list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) |
1572 | kvm_mmu_zap_page(kvm, sp); | 1574 | kvm_mmu_zap_page(kvm, sp); |
1573 | 1575 | ||
1574 | kvm_flush_remote_tlbs(kvm); | 1576 | kvm_flush_remote_tlbs(kvm); |
@@ -1738,7 +1740,7 @@ static int count_writable_mappings(struct kvm_vcpu *vcpu) | |||
1738 | struct kvm_mmu_page *sp; | 1740 | struct kvm_mmu_page *sp; |
1739 | int i; | 1741 | int i; |
1740 | 1742 | ||
1741 | list_for_each_entry(sp, &vcpu->kvm->active_mmu_pages, link) { | 1743 | list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) { |
1742 | u64 *pt = sp->spt; | 1744 | u64 *pt = sp->spt; |
1743 | 1745 | ||
1744 | if (sp->role.level != PT_PAGE_TABLE_LEVEL) | 1746 | if (sp->role.level != PT_PAGE_TABLE_LEVEL) |
@@ -1774,7 +1776,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu) | |||
1774 | unsigned long *rmapp; | 1776 | unsigned long *rmapp; |
1775 | gfn_t gfn; | 1777 | gfn_t gfn; |
1776 | 1778 | ||
1777 | list_for_each_entry(sp, &vcpu->kvm->active_mmu_pages, link) { | 1779 | list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) { |
1778 | if (sp->role.metaphysical) | 1780 | if (sp->role.metaphysical) |
1779 | continue; | 1781 | continue; |
1780 | 1782 | ||
diff --git a/drivers/kvm/mmu.h b/drivers/kvm/mmu.h index 9ebfd1cafe62..cbfc272262df 100644 --- a/drivers/kvm/mmu.h +++ b/drivers/kvm/mmu.h | |||
@@ -5,7 +5,7 @@ | |||
5 | 5 | ||
6 | static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | 6 | static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) |
7 | { | 7 | { |
8 | if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) | 8 | if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) |
9 | __kvm_mmu_free_some_pages(vcpu); | 9 | __kvm_mmu_free_some_pages(vcpu); |
10 | } | 10 | } |
11 | 11 | ||
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c index 7e1bd526bd5c..c0e95fb9f46c 100644 --- a/drivers/kvm/x86.c +++ b/drivers/kvm/x86.c | |||
@@ -1175,7 +1175,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, | |||
1175 | mutex_lock(&kvm->lock); | 1175 | mutex_lock(&kvm->lock); |
1176 | 1176 | ||
1177 | kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); | 1177 | kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); |
1178 | kvm->n_requested_mmu_pages = kvm_nr_mmu_pages; | 1178 | kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; |
1179 | 1179 | ||
1180 | mutex_unlock(&kvm->lock); | 1180 | mutex_unlock(&kvm->lock); |
1181 | return 0; | 1181 | return 0; |
@@ -1183,7 +1183,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, | |||
1183 | 1183 | ||
1184 | static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) | 1184 | static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) |
1185 | { | 1185 | { |
1186 | return kvm->n_alloc_mmu_pages; | 1186 | return kvm->arch.n_alloc_mmu_pages; |
1187 | } | 1187 | } |
1188 | 1188 | ||
1189 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | 1189 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) |
@@ -3051,7 +3051,7 @@ struct kvm *kvm_arch_create_vm(void) | |||
3051 | if (!kvm) | 3051 | if (!kvm) |
3052 | return ERR_PTR(-ENOMEM); | 3052 | return ERR_PTR(-ENOMEM); |
3053 | 3053 | ||
3054 | INIT_LIST_HEAD(&kvm->active_mmu_pages); | 3054 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); |
3055 | 3055 | ||
3056 | return kvm; | 3056 | return kvm; |
3057 | } | 3057 | } |
@@ -3130,7 +3130,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
3130 | } | 3130 | } |
3131 | } | 3131 | } |
3132 | 3132 | ||
3133 | if (!kvm->n_requested_mmu_pages) { | 3133 | if (!kvm->arch.n_requested_mmu_pages) { |
3134 | unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); | 3134 | unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); |
3135 | kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); | 3135 | kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); |
3136 | } | 3136 | } |
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h index be84f2b89095..5cdc3666e212 100644 --- a/drivers/kvm/x86.h +++ b/drivers/kvm/x86.h | |||
@@ -266,6 +266,15 @@ struct kvm_mem_alias { | |||
266 | struct kvm_arch{ | 266 | struct kvm_arch{ |
267 | int naliases; | 267 | int naliases; |
268 | struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; | 268 | struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; |
269 | |||
270 | unsigned int n_free_mmu_pages; | ||
271 | unsigned int n_requested_mmu_pages; | ||
272 | unsigned int n_alloc_mmu_pages; | ||
273 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; | ||
274 | /* | ||
275 | * Hash table of struct kvm_mmu_page. | ||
276 | */ | ||
277 | struct list_head active_mmu_pages; | ||
269 | }; | 278 | }; |
270 | 279 | ||
271 | struct kvm_vcpu_stat { | 280 | struct kvm_vcpu_stat { |