aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2009-04-05 13:54:47 -0400
committerAvi Kivity <avi@redhat.com>2009-06-10 04:48:39 -0400
commitc2d0ee46e6e633a3c23ecbcb9b03ad731906cd79 (patch)
tree0d50f23f7a4c2de710085cd66b5f3c25f81d12e3 /arch/x86
parent2f8b9ee14eb439008e0c5131116ea6baa40dba50 (diff)
KVM: MMU: remove global page optimization logic
Complexity to fix it not worthwhile the gains, as discussed in http://article.gmane.org/gmane.comp.emulators.kvm.devel/28649. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/kvm/mmu.c50
-rw-r--r--arch/x86/kvm/paging_tmpl.h6
-rw-r--r--arch/x86/kvm/x86.c4
4 files changed, 8 insertions, 56 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3fc46238476c..0e3a7c6e522c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -213,7 +213,6 @@ struct kvm_mmu_page {
213 int multimapped; /* More than one parent_pte? */ 213 int multimapped; /* More than one parent_pte? */
214 int root_count; /* Currently serving as active root */ 214 int root_count; /* Currently serving as active root */
215 bool unsync; 215 bool unsync;
216 bool global;
217 unsigned int unsync_children; 216 unsigned int unsync_children;
218 union { 217 union {
219 u64 *parent_pte; /* !multimapped */ 218 u64 *parent_pte; /* !multimapped */
@@ -395,7 +394,6 @@ struct kvm_arch{
395 */ 394 */
396 struct list_head active_mmu_pages; 395 struct list_head active_mmu_pages;
397 struct list_head assigned_dev_head; 396 struct list_head assigned_dev_head;
398 struct list_head oos_global_pages;
399 struct iommu_domain *iommu_domain; 397 struct iommu_domain *iommu_domain;
400 struct kvm_pic *vpic; 398 struct kvm_pic *vpic;
401 struct kvm_ioapic *vioapic; 399 struct kvm_ioapic *vioapic;
@@ -425,7 +423,6 @@ struct kvm_vm_stat {
425 u32 mmu_recycled; 423 u32 mmu_recycled;
426 u32 mmu_cache_miss; 424 u32 mmu_cache_miss;
427 u32 mmu_unsync; 425 u32 mmu_unsync;
428 u32 mmu_unsync_global;
429 u32 remote_tlb_flush; 426 u32 remote_tlb_flush;
430 u32 lpages; 427 u32 lpages;
431}; 428};
@@ -640,7 +637,6 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
640int kvm_mmu_load(struct kvm_vcpu *vcpu); 637int kvm_mmu_load(struct kvm_vcpu *vcpu);
641void kvm_mmu_unload(struct kvm_vcpu *vcpu); 638void kvm_mmu_unload(struct kvm_vcpu *vcpu);
642void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 639void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
643void kvm_mmu_sync_global(struct kvm_vcpu *vcpu);
644 640
645int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); 641int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
646 642
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b582adde6830..b39ec626040e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1075,18 +1075,10 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
1075 return NULL; 1075 return NULL;
1076} 1076}
1077 1077
1078static void kvm_unlink_unsync_global(struct kvm *kvm, struct kvm_mmu_page *sp)
1079{
1080 list_del(&sp->oos_link);
1081 --kvm->stat.mmu_unsync_global;
1082}
1083
1084static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) 1078static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1085{ 1079{
1086 WARN_ON(!sp->unsync); 1080 WARN_ON(!sp->unsync);
1087 sp->unsync = 0; 1081 sp->unsync = 0;
1088 if (sp->global)
1089 kvm_unlink_unsync_global(kvm, sp);
1090 --kvm->stat.mmu_unsync; 1082 --kvm->stat.mmu_unsync;
1091} 1083}
1092 1084
@@ -1249,7 +1241,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1249 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); 1241 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1250 sp->gfn = gfn; 1242 sp->gfn = gfn;
1251 sp->role = role; 1243 sp->role = role;
1252 sp->global = 0;
1253 hlist_add_head(&sp->hash_link, bucket); 1244 hlist_add_head(&sp->hash_link, bucket);
1254 if (!direct) { 1245 if (!direct) {
1255 if (rmap_write_protect(vcpu->kvm, gfn)) 1246 if (rmap_write_protect(vcpu->kvm, gfn))
@@ -1647,11 +1638,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1647 ++vcpu->kvm->stat.mmu_unsync; 1638 ++vcpu->kvm->stat.mmu_unsync;
1648 sp->unsync = 1; 1639 sp->unsync = 1;
1649 1640
1650 if (sp->global) { 1641 kvm_mmu_mark_parents_unsync(vcpu, sp);
1651 list_add(&sp->oos_link, &vcpu->kvm->arch.oos_global_pages);
1652 ++vcpu->kvm->stat.mmu_unsync_global;
1653 } else
1654 kvm_mmu_mark_parents_unsync(vcpu, sp);
1655 1642
1656 mmu_convert_notrap(sp); 1643 mmu_convert_notrap(sp);
1657 return 0; 1644 return 0;
@@ -1678,21 +1665,12 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1678static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 1665static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1679 unsigned pte_access, int user_fault, 1666 unsigned pte_access, int user_fault,
1680 int write_fault, int dirty, int largepage, 1667 int write_fault, int dirty, int largepage,
1681 int global, gfn_t gfn, pfn_t pfn, bool speculative, 1668 gfn_t gfn, pfn_t pfn, bool speculative,
1682 bool can_unsync) 1669 bool can_unsync)
1683{ 1670{
1684 u64 spte; 1671 u64 spte;
1685 int ret = 0; 1672 int ret = 0;
1686 u64 mt_mask = shadow_mt_mask; 1673 u64 mt_mask = shadow_mt_mask;
1687 struct kvm_mmu_page *sp = page_header(__pa(shadow_pte));
1688
1689 if (!global && sp->global) {
1690 sp->global = 0;
1691 if (sp->unsync) {
1692 kvm_unlink_unsync_global(vcpu->kvm, sp);
1693 kvm_mmu_mark_parents_unsync(vcpu, sp);
1694 }
1695 }
1696 1674
1697 /* 1675 /*
1698 * We don't set the accessed bit, since we sometimes want to see 1676 * We don't set the accessed bit, since we sometimes want to see
@@ -1766,8 +1744,8 @@ set_pte:
1766static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 1744static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1767 unsigned pt_access, unsigned pte_access, 1745 unsigned pt_access, unsigned pte_access,
1768 int user_fault, int write_fault, int dirty, 1746 int user_fault, int write_fault, int dirty,
1769 int *ptwrite, int largepage, int global, 1747 int *ptwrite, int largepage, gfn_t gfn,
1770 gfn_t gfn, pfn_t pfn, bool speculative) 1748 pfn_t pfn, bool speculative)
1771{ 1749{
1772 int was_rmapped = 0; 1750 int was_rmapped = 0;
1773 int was_writeble = is_writeble_pte(*shadow_pte); 1751 int was_writeble = is_writeble_pte(*shadow_pte);
@@ -1796,7 +1774,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1796 was_rmapped = 1; 1774 was_rmapped = 1;
1797 } 1775 }
1798 if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault, 1776 if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
1799 dirty, largepage, global, gfn, pfn, speculative, true)) { 1777 dirty, largepage, gfn, pfn, speculative, true)) {
1800 if (write_fault) 1778 if (write_fault)
1801 *ptwrite = 1; 1779 *ptwrite = 1;
1802 kvm_x86_ops->tlb_flush(vcpu); 1780 kvm_x86_ops->tlb_flush(vcpu);
@@ -1844,7 +1822,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1844 || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) { 1822 || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
1845 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, 1823 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1846 0, write, 1, &pt_write, 1824 0, write, 1, &pt_write,
1847 largepage, 0, gfn, pfn, false); 1825 largepage, gfn, pfn, false);
1848 ++vcpu->stat.pf_fixed; 1826 ++vcpu->stat.pf_fixed;
1849 break; 1827 break;
1850 } 1828 }
@@ -2015,15 +1993,6 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2015 } 1993 }
2016} 1994}
2017 1995
2018static void mmu_sync_global(struct kvm_vcpu *vcpu)
2019{
2020 struct kvm *kvm = vcpu->kvm;
2021 struct kvm_mmu_page *sp, *n;
2022
2023 list_for_each_entry_safe(sp, n, &kvm->arch.oos_global_pages, oos_link)
2024 kvm_sync_page(vcpu, sp);
2025}
2026
2027void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) 1996void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2028{ 1997{
2029 spin_lock(&vcpu->kvm->mmu_lock); 1998 spin_lock(&vcpu->kvm->mmu_lock);
@@ -2031,13 +2000,6 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2031 spin_unlock(&vcpu->kvm->mmu_lock); 2000 spin_unlock(&vcpu->kvm->mmu_lock);
2032} 2001}
2033 2002
2034void kvm_mmu_sync_global(struct kvm_vcpu *vcpu)
2035{
2036 spin_lock(&vcpu->kvm->mmu_lock);
2037 mmu_sync_global(vcpu);
2038 spin_unlock(&vcpu->kvm->mmu_lock);
2039}
2040
2041static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) 2003static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
2042{ 2004{
2043 return vaddr; 2005 return vaddr;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 09782a982785..258e4591e1ca 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -268,8 +268,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
268 kvm_get_pfn(pfn); 268 kvm_get_pfn(pfn);
269 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, 269 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
270 gpte & PT_DIRTY_MASK, NULL, largepage, 270 gpte & PT_DIRTY_MASK, NULL, largepage,
271 gpte & PT_GLOBAL_MASK, gpte_to_gfn(gpte), 271 gpte_to_gfn(gpte), pfn, true);
272 pfn, true);
273} 272}
274 273
275/* 274/*
@@ -303,7 +302,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
303 user_fault, write_fault, 302 user_fault, write_fault,
304 gw->ptes[gw->level-1] & PT_DIRTY_MASK, 303 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
305 ptwrite, largepage, 304 ptwrite, largepage,
306 gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
307 gw->gfn, pfn, false); 305 gw->gfn, pfn, false);
308 break; 306 break;
309 } 307 }
@@ -592,7 +590,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
592 nr_present++; 590 nr_present++;
593 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); 591 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
594 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, 592 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
595 is_dirty_pte(gpte), 0, gpte & PT_GLOBAL_MASK, gfn, 593 is_dirty_pte(gpte), 0, gfn,
596 spte_to_pfn(sp->spt[i]), true, false); 594 spte_to_pfn(sp->spt[i]), true, false);
597 } 595 }
598 596
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bb04f11bf70f..b5ac1b722454 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -108,7 +108,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
108 { "mmu_recycled", VM_STAT(mmu_recycled) }, 108 { "mmu_recycled", VM_STAT(mmu_recycled) },
109 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, 109 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
110 { "mmu_unsync", VM_STAT(mmu_unsync) }, 110 { "mmu_unsync", VM_STAT(mmu_unsync) },
111 { "mmu_unsync_global", VM_STAT(mmu_unsync_global) },
112 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, 111 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
113 { "largepages", VM_STAT(lpages) }, 112 { "largepages", VM_STAT(lpages) },
114 { NULL } 113 { NULL }
@@ -322,7 +321,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
322 kvm_x86_ops->set_cr0(vcpu, cr0); 321 kvm_x86_ops->set_cr0(vcpu, cr0);
323 vcpu->arch.cr0 = cr0; 322 vcpu->arch.cr0 = cr0;
324 323
325 kvm_mmu_sync_global(vcpu);
326 kvm_mmu_reset_context(vcpu); 324 kvm_mmu_reset_context(vcpu);
327 return; 325 return;
328} 326}
@@ -371,7 +369,6 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
371 kvm_x86_ops->set_cr4(vcpu, cr4); 369 kvm_x86_ops->set_cr4(vcpu, cr4);
372 vcpu->arch.cr4 = cr4; 370 vcpu->arch.cr4 = cr4;
373 vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled; 371 vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
374 kvm_mmu_sync_global(vcpu);
375 kvm_mmu_reset_context(vcpu); 372 kvm_mmu_reset_context(vcpu);
376} 373}
377EXPORT_SYMBOL_GPL(kvm_set_cr4); 374EXPORT_SYMBOL_GPL(kvm_set_cr4);
@@ -4364,7 +4361,6 @@ struct kvm *kvm_arch_create_vm(void)
4364 return ERR_PTR(-ENOMEM); 4361 return ERR_PTR(-ENOMEM);
4365 4362
4366 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); 4363 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
4367 INIT_LIST_HEAD(&kvm->arch.oos_global_pages);
4368 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 4364 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
4369 4365
4370 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 4366 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */