aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-03-20 08:34:28 -0400
committerAvi Kivity <avi@qumranet.com>2007-05-03 03:52:25 -0400
commitaac012245a59d78372dc66d292ba567367d86b60 (patch)
tree4faf7564b746b4efd6be91ca83b475cfd9d446e7 /drivers/kvm
parentca5aac1f96c18b5e4dcfea253d7ab607b5dcd5c9 (diff)
KVM: MMU: Remove global pte tracking
The initial, noncaching, version of the kvm mmu flushed the all nonglobal shadow page table translations (much like a native tlb flush). The new implementation flushes translations only when they change, rendering global pte tracking superfluous. This removes the unused tracking mechanism and storage space. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/kvm.h1
-rw-r--r--drivers/kvm/mmu.c9
2 files changed, 0 insertions, 10 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 7866b34b6c96..a4331da816d0 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -136,7 +136,6 @@ struct kvm_mmu_page {
136 unsigned long slot_bitmap; /* One bit set per slot which has memory 136 unsigned long slot_bitmap; /* One bit set per slot which has memory
137 * in this shadow page. 137 * in this shadow page.
138 */ 138 */
139 int global; /* Set if all ptes in this page are global */
140 int multimapped; /* More than one parent_pte? */ 139 int multimapped; /* More than one parent_pte? */
141 int root_count; /* Currently serving as active root */ 140 int root_count; /* Currently serving as active root */
142 union { 141 union {
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 4843e95e54e1..2930d7cc7c06 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -461,7 +461,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
461 list_add(&page->link, &vcpu->kvm->active_mmu_pages); 461 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
462 ASSERT(is_empty_shadow_page(page->page_hpa)); 462 ASSERT(is_empty_shadow_page(page->page_hpa));
463 page->slot_bitmap = 0; 463 page->slot_bitmap = 0;
464 page->global = 1;
465 page->multimapped = 0; 464 page->multimapped = 0;
466 page->parent_pte = parent_pte; 465 page->parent_pte = parent_pte;
467 --vcpu->kvm->n_free_mmu_pages; 466 --vcpu->kvm->n_free_mmu_pages;
@@ -927,11 +926,6 @@ static void paging_new_cr3(struct kvm_vcpu *vcpu)
927 kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa); 926 kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
928} 927}
929 928
930static void mark_pagetable_nonglobal(void *shadow_pte)
931{
932 page_header(__pa(shadow_pte))->global = 0;
933}
934
935static inline void set_pte_common(struct kvm_vcpu *vcpu, 929static inline void set_pte_common(struct kvm_vcpu *vcpu,
936 u64 *shadow_pte, 930 u64 *shadow_pte,
937 gpa_t gaddr, 931 gpa_t gaddr,
@@ -949,9 +943,6 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu,
949 943
950 *shadow_pte |= access_bits; 944 *shadow_pte |= access_bits;
951 945
952 if (!(*shadow_pte & PT_GLOBAL_MASK))
953 mark_pagetable_nonglobal(shadow_pte);
954
955 if (is_error_hpa(paddr)) { 946 if (is_error_hpa(paddr)) {
956 *shadow_pte |= gaddr; 947 *shadow_pte |= gaddr;
957 *shadow_pte |= PT_SHADOW_IO_MARK; 948 *shadow_pte |= PT_SHADOW_IO_MARK;