aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-12-01 19:32:04 -0500
committerAvi Kivity <avi@redhat.com>2008-12-31 09:55:44 -0500
commit6cffe8ca4a2adf1ac5003d9cad08fe4434d6eee0 (patch)
tree9ee09235ed1dc1f26c8988557ddb2fb82cdb2b05 /arch/x86/kvm/mmu.c
parentb1a368218ad5b6e62380c8f206f16e6f18bf154c (diff)
KVM: MMU: skip global pgtables on sync due to cr3 switch
Skip syncing global pages on cr3 switch (but not on cr4/cr0). This is important for Linux 32-bit guests with PAE, where the kmap page is marked as global. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c53
1 files changed, 47 insertions, 6 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 58c35dead32..cbac9e4b156 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -793,9 +793,11 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
793 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); 793 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
794 set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 794 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
795 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); 795 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
796 INIT_LIST_HEAD(&sp->oos_link);
796 ASSERT(is_empty_shadow_page(sp->spt)); 797 ASSERT(is_empty_shadow_page(sp->spt));
797 bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); 798 bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
798 sp->multimapped = 0; 799 sp->multimapped = 0;
800 sp->global = 1;
799 sp->parent_pte = parent_pte; 801 sp->parent_pte = parent_pte;
800 --vcpu->kvm->arch.n_free_mmu_pages; 802 --vcpu->kvm->arch.n_free_mmu_pages;
801 return sp; 803 return sp;
@@ -1066,10 +1068,18 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
1066 return NULL; 1068 return NULL;
1067} 1069}
1068 1070
1071static void kvm_unlink_unsync_global(struct kvm *kvm, struct kvm_mmu_page *sp)
1072{
1073 list_del(&sp->oos_link);
1074 --kvm->stat.mmu_unsync_global;
1075}
1076
1069static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) 1077static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1070{ 1078{
1071 WARN_ON(!sp->unsync); 1079 WARN_ON(!sp->unsync);
1072 sp->unsync = 0; 1080 sp->unsync = 0;
1081 if (sp->global)
1082 kvm_unlink_unsync_global(kvm, sp);
1073 --kvm->stat.mmu_unsync; 1083 --kvm->stat.mmu_unsync;
1074} 1084}
1075 1085
@@ -1615,9 +1625,15 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1615 if (s->role.word != sp->role.word) 1625 if (s->role.word != sp->role.word)
1616 return 1; 1626 return 1;
1617 } 1627 }
1618 kvm_mmu_mark_parents_unsync(vcpu, sp);
1619 ++vcpu->kvm->stat.mmu_unsync; 1628 ++vcpu->kvm->stat.mmu_unsync;
1620 sp->unsync = 1; 1629 sp->unsync = 1;
1630
1631 if (sp->global) {
1632 list_add(&sp->oos_link, &vcpu->kvm->arch.oos_global_pages);
1633 ++vcpu->kvm->stat.mmu_unsync_global;
1634 } else
1635 kvm_mmu_mark_parents_unsync(vcpu, sp);
1636
1621 mmu_convert_notrap(sp); 1637 mmu_convert_notrap(sp);
1622 return 0; 1638 return 0;
1623} 1639}
@@ -1643,12 +1659,21 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1643static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 1659static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1644 unsigned pte_access, int user_fault, 1660 unsigned pte_access, int user_fault,
1645 int write_fault, int dirty, int largepage, 1661 int write_fault, int dirty, int largepage,
1646 gfn_t gfn, pfn_t pfn, bool speculative, 1662 int global, gfn_t gfn, pfn_t pfn, bool speculative,
1647 bool can_unsync) 1663 bool can_unsync)
1648{ 1664{
1649 u64 spte; 1665 u64 spte;
1650 int ret = 0; 1666 int ret = 0;
1651 u64 mt_mask = shadow_mt_mask; 1667 u64 mt_mask = shadow_mt_mask;
1668 struct kvm_mmu_page *sp = page_header(__pa(shadow_pte));
1669
1670 if (!global && sp->global) {
1671 sp->global = 0;
1672 if (sp->unsync) {
1673 kvm_unlink_unsync_global(vcpu->kvm, sp);
1674 kvm_mmu_mark_parents_unsync(vcpu, sp);
1675 }
1676 }
1652 1677
1653 /* 1678 /*
1654 * We don't set the accessed bit, since we sometimes want to see 1679 * We don't set the accessed bit, since we sometimes want to see
@@ -1717,8 +1742,8 @@ set_pte:
1717static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 1742static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1718 unsigned pt_access, unsigned pte_access, 1743 unsigned pt_access, unsigned pte_access,
1719 int user_fault, int write_fault, int dirty, 1744 int user_fault, int write_fault, int dirty,
1720 int *ptwrite, int largepage, gfn_t gfn, 1745 int *ptwrite, int largepage, int global,
1721 pfn_t pfn, bool speculative) 1746 gfn_t gfn, pfn_t pfn, bool speculative)
1722{ 1747{
1723 int was_rmapped = 0; 1748 int was_rmapped = 0;
1724 int was_writeble = is_writeble_pte(*shadow_pte); 1749 int was_writeble = is_writeble_pte(*shadow_pte);
@@ -1751,7 +1776,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1751 } 1776 }
1752 } 1777 }
1753 if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault, 1778 if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
1754 dirty, largepage, gfn, pfn, speculative, true)) { 1779 dirty, largepage, global, gfn, pfn, speculative, true)) {
1755 if (write_fault) 1780 if (write_fault)
1756 *ptwrite = 1; 1781 *ptwrite = 1;
1757 kvm_x86_ops->tlb_flush(vcpu); 1782 kvm_x86_ops->tlb_flush(vcpu);
@@ -1808,7 +1833,7 @@ static int direct_map_entry(struct kvm_shadow_walk *_walk,
1808 || (walk->largepage && level == PT_DIRECTORY_LEVEL)) { 1833 || (walk->largepage && level == PT_DIRECTORY_LEVEL)) {
1809 mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL, 1834 mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL,
1810 0, walk->write, 1, &walk->pt_write, 1835 0, walk->write, 1, &walk->pt_write,
1811 walk->largepage, gfn, walk->pfn, false); 1836 walk->largepage, 0, gfn, walk->pfn, false);
1812 ++vcpu->stat.pf_fixed; 1837 ++vcpu->stat.pf_fixed;
1813 return 1; 1838 return 1;
1814 } 1839 }
@@ -1995,6 +2020,15 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
1995 } 2020 }
1996} 2021}
1997 2022
2023static void mmu_sync_global(struct kvm_vcpu *vcpu)
2024{
2025 struct kvm *kvm = vcpu->kvm;
2026 struct kvm_mmu_page *sp, *n;
2027
2028 list_for_each_entry_safe(sp, n, &kvm->arch.oos_global_pages, oos_link)
2029 kvm_sync_page(vcpu, sp);
2030}
2031
1998void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) 2032void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
1999{ 2033{
2000 spin_lock(&vcpu->kvm->mmu_lock); 2034 spin_lock(&vcpu->kvm->mmu_lock);
@@ -2002,6 +2036,13 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2002 spin_unlock(&vcpu->kvm->mmu_lock); 2036 spin_unlock(&vcpu->kvm->mmu_lock);
2003} 2037}
2004 2038
2039void kvm_mmu_sync_global(struct kvm_vcpu *vcpu)
2040{
2041 spin_lock(&vcpu->kvm->mmu_lock);
2042 mmu_sync_global(vcpu);
2043 spin_unlock(&vcpu->kvm->mmu_lock);
2044}
2045
2005static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) 2046static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
2006{ 2047{
2007 return vaddr; 2048 return vaddr;