aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-12-01 19:32:04 -0500
committerAvi Kivity <avi@redhat.com>2008-12-31 09:55:44 -0500
commit6cffe8ca4a2adf1ac5003d9cad08fe4434d6eee0 (patch)
tree9ee09235ed1dc1f26c8988557ddb2fb82cdb2b05 /arch/x86/kvm
parentb1a368218ad5b6e62380c8f206f16e6f18bf154c (diff)
KVM: MMU: skip global pgtables on sync due to cr3 switch
Skip syncing global pages on cr3 switch (but not on cr4/cr0). This is important for Linux 32-bit guests with PAE, where the kmap page is marked as global. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c53
-rw-r--r--arch/x86/kvm/paging_tmpl.h10
-rw-r--r--arch/x86/kvm/x86.c4
3 files changed, 57 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 58c35dead321..cbac9e4b156f 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -793,9 +793,11 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
793 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); 793 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
794 set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 794 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
795 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); 795 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
796 INIT_LIST_HEAD(&sp->oos_link);
796 ASSERT(is_empty_shadow_page(sp->spt)); 797 ASSERT(is_empty_shadow_page(sp->spt));
797 bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); 798 bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
798 sp->multimapped = 0; 799 sp->multimapped = 0;
800 sp->global = 1;
799 sp->parent_pte = parent_pte; 801 sp->parent_pte = parent_pte;
800 --vcpu->kvm->arch.n_free_mmu_pages; 802 --vcpu->kvm->arch.n_free_mmu_pages;
801 return sp; 803 return sp;
@@ -1066,10 +1068,18 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
1066 return NULL; 1068 return NULL;
1067} 1069}
1068 1070
1071static void kvm_unlink_unsync_global(struct kvm *kvm, struct kvm_mmu_page *sp)
1072{
1073 list_del(&sp->oos_link);
1074 --kvm->stat.mmu_unsync_global;
1075}
1076
1069static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) 1077static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1070{ 1078{
1071 WARN_ON(!sp->unsync); 1079 WARN_ON(!sp->unsync);
1072 sp->unsync = 0; 1080 sp->unsync = 0;
1081 if (sp->global)
1082 kvm_unlink_unsync_global(kvm, sp);
1073 --kvm->stat.mmu_unsync; 1083 --kvm->stat.mmu_unsync;
1074} 1084}
1075 1085
@@ -1615,9 +1625,15 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1615 if (s->role.word != sp->role.word) 1625 if (s->role.word != sp->role.word)
1616 return 1; 1626 return 1;
1617 } 1627 }
1618 kvm_mmu_mark_parents_unsync(vcpu, sp);
1619 ++vcpu->kvm->stat.mmu_unsync; 1628 ++vcpu->kvm->stat.mmu_unsync;
1620 sp->unsync = 1; 1629 sp->unsync = 1;
1630
1631 if (sp->global) {
1632 list_add(&sp->oos_link, &vcpu->kvm->arch.oos_global_pages);
1633 ++vcpu->kvm->stat.mmu_unsync_global;
1634 } else
1635 kvm_mmu_mark_parents_unsync(vcpu, sp);
1636
1621 mmu_convert_notrap(sp); 1637 mmu_convert_notrap(sp);
1622 return 0; 1638 return 0;
1623} 1639}
@@ -1643,12 +1659,21 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1643static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 1659static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1644 unsigned pte_access, int user_fault, 1660 unsigned pte_access, int user_fault,
1645 int write_fault, int dirty, int largepage, 1661 int write_fault, int dirty, int largepage,
1646 gfn_t gfn, pfn_t pfn, bool speculative, 1662 int global, gfn_t gfn, pfn_t pfn, bool speculative,
1647 bool can_unsync) 1663 bool can_unsync)
1648{ 1664{
1649 u64 spte; 1665 u64 spte;
1650 int ret = 0; 1666 int ret = 0;
1651 u64 mt_mask = shadow_mt_mask; 1667 u64 mt_mask = shadow_mt_mask;
1668 struct kvm_mmu_page *sp = page_header(__pa(shadow_pte));
1669
1670 if (!global && sp->global) {
1671 sp->global = 0;
1672 if (sp->unsync) {
1673 kvm_unlink_unsync_global(vcpu->kvm, sp);
1674 kvm_mmu_mark_parents_unsync(vcpu, sp);
1675 }
1676 }
1652 1677
1653 /* 1678 /*
1654 * We don't set the accessed bit, since we sometimes want to see 1679 * We don't set the accessed bit, since we sometimes want to see
@@ -1717,8 +1742,8 @@ set_pte:
1717static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 1742static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1718 unsigned pt_access, unsigned pte_access, 1743 unsigned pt_access, unsigned pte_access,
1719 int user_fault, int write_fault, int dirty, 1744 int user_fault, int write_fault, int dirty,
1720 int *ptwrite, int largepage, gfn_t gfn, 1745 int *ptwrite, int largepage, int global,
1721 pfn_t pfn, bool speculative) 1746 gfn_t gfn, pfn_t pfn, bool speculative)
1722{ 1747{
1723 int was_rmapped = 0; 1748 int was_rmapped = 0;
1724 int was_writeble = is_writeble_pte(*shadow_pte); 1749 int was_writeble = is_writeble_pte(*shadow_pte);
@@ -1751,7 +1776,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1751 } 1776 }
1752 } 1777 }
1753 if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault, 1778 if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
1754 dirty, largepage, gfn, pfn, speculative, true)) { 1779 dirty, largepage, global, gfn, pfn, speculative, true)) {
1755 if (write_fault) 1780 if (write_fault)
1756 *ptwrite = 1; 1781 *ptwrite = 1;
1757 kvm_x86_ops->tlb_flush(vcpu); 1782 kvm_x86_ops->tlb_flush(vcpu);
@@ -1808,7 +1833,7 @@ static int direct_map_entry(struct kvm_shadow_walk *_walk,
1808 || (walk->largepage && level == PT_DIRECTORY_LEVEL)) { 1833 || (walk->largepage && level == PT_DIRECTORY_LEVEL)) {
1809 mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL, 1834 mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL,
1810 0, walk->write, 1, &walk->pt_write, 1835 0, walk->write, 1, &walk->pt_write,
1811 walk->largepage, gfn, walk->pfn, false); 1836 walk->largepage, 0, gfn, walk->pfn, false);
1812 ++vcpu->stat.pf_fixed; 1837 ++vcpu->stat.pf_fixed;
1813 return 1; 1838 return 1;
1814 } 1839 }
@@ -1995,6 +2020,15 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
1995 } 2020 }
1996} 2021}
1997 2022
2023static void mmu_sync_global(struct kvm_vcpu *vcpu)
2024{
2025 struct kvm *kvm = vcpu->kvm;
2026 struct kvm_mmu_page *sp, *n;
2027
2028 list_for_each_entry_safe(sp, n, &kvm->arch.oos_global_pages, oos_link)
2029 kvm_sync_page(vcpu, sp);
2030}
2031
1998void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) 2032void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
1999{ 2033{
2000 spin_lock(&vcpu->kvm->mmu_lock); 2034 spin_lock(&vcpu->kvm->mmu_lock);
@@ -2002,6 +2036,13 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2002 spin_unlock(&vcpu->kvm->mmu_lock); 2036 spin_unlock(&vcpu->kvm->mmu_lock);
2003} 2037}
2004 2038
2039void kvm_mmu_sync_global(struct kvm_vcpu *vcpu)
2040{
2041 spin_lock(&vcpu->kvm->mmu_lock);
2042 mmu_sync_global(vcpu);
2043 spin_unlock(&vcpu->kvm->mmu_lock);
2044}
2045
2005static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) 2046static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
2006{ 2047{
2007 return vaddr; 2048 return vaddr;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 84eee43bbe74..e644d81979b6 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -274,7 +274,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
274 return; 274 return;
275 kvm_get_pfn(pfn); 275 kvm_get_pfn(pfn);
276 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, 276 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
277 gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte), 277 gpte & PT_DIRTY_MASK, NULL, largepage,
278 gpte & PT_GLOBAL_MASK, gpte_to_gfn(gpte),
278 pfn, true); 279 pfn, true);
279} 280}
280 281
@@ -301,8 +302,9 @@ static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
301 mmu_set_spte(vcpu, sptep, access, gw->pte_access & access, 302 mmu_set_spte(vcpu, sptep, access, gw->pte_access & access,
302 sw->user_fault, sw->write_fault, 303 sw->user_fault, sw->write_fault,
303 gw->ptes[gw->level-1] & PT_DIRTY_MASK, 304 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
304 sw->ptwrite, sw->largepage, gw->gfn, sw->pfn, 305 sw->ptwrite, sw->largepage,
305 false); 306 gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
307 gw->gfn, sw->pfn, false);
306 sw->sptep = sptep; 308 sw->sptep = sptep;
307 return 1; 309 return 1;
308 } 310 }
@@ -580,7 +582,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
580 nr_present++; 582 nr_present++;
581 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); 583 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
582 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, 584 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
583 is_dirty_pte(gpte), 0, gfn, 585 is_dirty_pte(gpte), 0, gpte & PT_GLOBAL_MASK, gfn,
584 spte_to_pfn(sp->spt[i]), true, false); 586 spte_to_pfn(sp->spt[i]), true, false);
585 } 587 }
586 588
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7a2aeba0bfbd..774db00d2db6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -104,6 +104,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
104 { "mmu_recycled", VM_STAT(mmu_recycled) }, 104 { "mmu_recycled", VM_STAT(mmu_recycled) },
105 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, 105 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
106 { "mmu_unsync", VM_STAT(mmu_unsync) }, 106 { "mmu_unsync", VM_STAT(mmu_unsync) },
107 { "mmu_unsync_global", VM_STAT(mmu_unsync_global) },
107 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, 108 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
108 { "largepages", VM_STAT(lpages) }, 109 { "largepages", VM_STAT(lpages) },
109 { NULL } 110 { NULL }
@@ -315,6 +316,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
315 kvm_x86_ops->set_cr0(vcpu, cr0); 316 kvm_x86_ops->set_cr0(vcpu, cr0);
316 vcpu->arch.cr0 = cr0; 317 vcpu->arch.cr0 = cr0;
317 318
319 kvm_mmu_sync_global(vcpu);
318 kvm_mmu_reset_context(vcpu); 320 kvm_mmu_reset_context(vcpu);
319 return; 321 return;
320} 322}
@@ -358,6 +360,7 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
358 } 360 }
359 kvm_x86_ops->set_cr4(vcpu, cr4); 361 kvm_x86_ops->set_cr4(vcpu, cr4);
360 vcpu->arch.cr4 = cr4; 362 vcpu->arch.cr4 = cr4;
363 kvm_mmu_sync_global(vcpu);
361 kvm_mmu_reset_context(vcpu); 364 kvm_mmu_reset_context(vcpu);
362} 365}
363EXPORT_SYMBOL_GPL(kvm_set_cr4); 366EXPORT_SYMBOL_GPL(kvm_set_cr4);
@@ -4113,6 +4116,7 @@ struct kvm *kvm_arch_create_vm(void)
4113 return ERR_PTR(-ENOMEM); 4116 return ERR_PTR(-ENOMEM);
4114 4117
4115 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); 4118 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
4119 INIT_LIST_HEAD(&kvm->arch.oos_global_pages);
4116 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 4120 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
4117 4121
4118 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 4122 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */