diff options
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 50 |
1 files changed, 6 insertions, 44 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b582adde6830..b39ec626040e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1075,18 +1075,10 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) | |||
1075 | return NULL; | 1075 | return NULL; |
1076 | } | 1076 | } |
1077 | 1077 | ||
1078 | static void kvm_unlink_unsync_global(struct kvm *kvm, struct kvm_mmu_page *sp) | ||
1079 | { | ||
1080 | list_del(&sp->oos_link); | ||
1081 | --kvm->stat.mmu_unsync_global; | ||
1082 | } | ||
1083 | |||
1084 | static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) | 1078 | static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
1085 | { | 1079 | { |
1086 | WARN_ON(!sp->unsync); | 1080 | WARN_ON(!sp->unsync); |
1087 | sp->unsync = 0; | 1081 | sp->unsync = 0; |
1088 | if (sp->global) | ||
1089 | kvm_unlink_unsync_global(kvm, sp); | ||
1090 | --kvm->stat.mmu_unsync; | 1082 | --kvm->stat.mmu_unsync; |
1091 | } | 1083 | } |
1092 | 1084 | ||
@@ -1249,7 +1241,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1249 | pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); | 1241 | pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); |
1250 | sp->gfn = gfn; | 1242 | sp->gfn = gfn; |
1251 | sp->role = role; | 1243 | sp->role = role; |
1252 | sp->global = 0; | ||
1253 | hlist_add_head(&sp->hash_link, bucket); | 1244 | hlist_add_head(&sp->hash_link, bucket); |
1254 | if (!direct) { | 1245 | if (!direct) { |
1255 | if (rmap_write_protect(vcpu->kvm, gfn)) | 1246 | if (rmap_write_protect(vcpu->kvm, gfn)) |
@@ -1647,11 +1638,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
1647 | ++vcpu->kvm->stat.mmu_unsync; | 1638 | ++vcpu->kvm->stat.mmu_unsync; |
1648 | sp->unsync = 1; | 1639 | sp->unsync = 1; |
1649 | 1640 | ||
1650 | if (sp->global) { | 1641 | kvm_mmu_mark_parents_unsync(vcpu, sp); |
1651 | list_add(&sp->oos_link, &vcpu->kvm->arch.oos_global_pages); | ||
1652 | ++vcpu->kvm->stat.mmu_unsync_global; | ||
1653 | } else | ||
1654 | kvm_mmu_mark_parents_unsync(vcpu, sp); | ||
1655 | 1642 | ||
1656 | mmu_convert_notrap(sp); | 1643 | mmu_convert_notrap(sp); |
1657 | return 0; | 1644 | return 0; |
@@ -1678,21 +1665,12 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, | |||
1678 | static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | 1665 | static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, |
1679 | unsigned pte_access, int user_fault, | 1666 | unsigned pte_access, int user_fault, |
1680 | int write_fault, int dirty, int largepage, | 1667 | int write_fault, int dirty, int largepage, |
1681 | int global, gfn_t gfn, pfn_t pfn, bool speculative, | 1668 | gfn_t gfn, pfn_t pfn, bool speculative, |
1682 | bool can_unsync) | 1669 | bool can_unsync) |
1683 | { | 1670 | { |
1684 | u64 spte; | 1671 | u64 spte; |
1685 | int ret = 0; | 1672 | int ret = 0; |
1686 | u64 mt_mask = shadow_mt_mask; | 1673 | u64 mt_mask = shadow_mt_mask; |
1687 | struct kvm_mmu_page *sp = page_header(__pa(shadow_pte)); | ||
1688 | |||
1689 | if (!global && sp->global) { | ||
1690 | sp->global = 0; | ||
1691 | if (sp->unsync) { | ||
1692 | kvm_unlink_unsync_global(vcpu->kvm, sp); | ||
1693 | kvm_mmu_mark_parents_unsync(vcpu, sp); | ||
1694 | } | ||
1695 | } | ||
1696 | 1674 | ||
1697 | /* | 1675 | /* |
1698 | * We don't set the accessed bit, since we sometimes want to see | 1676 | * We don't set the accessed bit, since we sometimes want to see |
@@ -1766,8 +1744,8 @@ set_pte: | |||
1766 | static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | 1744 | static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, |
1767 | unsigned pt_access, unsigned pte_access, | 1745 | unsigned pt_access, unsigned pte_access, |
1768 | int user_fault, int write_fault, int dirty, | 1746 | int user_fault, int write_fault, int dirty, |
1769 | int *ptwrite, int largepage, int global, | 1747 | int *ptwrite, int largepage, gfn_t gfn, |
1770 | gfn_t gfn, pfn_t pfn, bool speculative) | 1748 | pfn_t pfn, bool speculative) |
1771 | { | 1749 | { |
1772 | int was_rmapped = 0; | 1750 | int was_rmapped = 0; |
1773 | int was_writeble = is_writeble_pte(*shadow_pte); | 1751 | int was_writeble = is_writeble_pte(*shadow_pte); |
@@ -1796,7 +1774,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1796 | was_rmapped = 1; | 1774 | was_rmapped = 1; |
1797 | } | 1775 | } |
1798 | if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault, | 1776 | if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault, |
1799 | dirty, largepage, global, gfn, pfn, speculative, true)) { | 1777 | dirty, largepage, gfn, pfn, speculative, true)) { |
1800 | if (write_fault) | 1778 | if (write_fault) |
1801 | *ptwrite = 1; | 1779 | *ptwrite = 1; |
1802 | kvm_x86_ops->tlb_flush(vcpu); | 1780 | kvm_x86_ops->tlb_flush(vcpu); |
@@ -1844,7 +1822,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, | |||
1844 | || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) { | 1822 | || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) { |
1845 | mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, | 1823 | mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, |
1846 | 0, write, 1, &pt_write, | 1824 | 0, write, 1, &pt_write, |
1847 | largepage, 0, gfn, pfn, false); | 1825 | largepage, gfn, pfn, false); |
1848 | ++vcpu->stat.pf_fixed; | 1826 | ++vcpu->stat.pf_fixed; |
1849 | break; | 1827 | break; |
1850 | } | 1828 | } |
@@ -2015,15 +1993,6 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu) | |||
2015 | } | 1993 | } |
2016 | } | 1994 | } |
2017 | 1995 | ||
2018 | static void mmu_sync_global(struct kvm_vcpu *vcpu) | ||
2019 | { | ||
2020 | struct kvm *kvm = vcpu->kvm; | ||
2021 | struct kvm_mmu_page *sp, *n; | ||
2022 | |||
2023 | list_for_each_entry_safe(sp, n, &kvm->arch.oos_global_pages, oos_link) | ||
2024 | kvm_sync_page(vcpu, sp); | ||
2025 | } | ||
2026 | |||
2027 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) | 1996 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) |
2028 | { | 1997 | { |
2029 | spin_lock(&vcpu->kvm->mmu_lock); | 1998 | spin_lock(&vcpu->kvm->mmu_lock); |
@@ -2031,13 +2000,6 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) | |||
2031 | spin_unlock(&vcpu->kvm->mmu_lock); | 2000 | spin_unlock(&vcpu->kvm->mmu_lock); |
2032 | } | 2001 | } |
2033 | 2002 | ||
2034 | void kvm_mmu_sync_global(struct kvm_vcpu *vcpu) | ||
2035 | { | ||
2036 | spin_lock(&vcpu->kvm->mmu_lock); | ||
2037 | mmu_sync_global(vcpu); | ||
2038 | spin_unlock(&vcpu->kvm->mmu_lock); | ||
2039 | } | ||
2040 | |||
2041 | static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) | 2003 | static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) |
2042 | { | 2004 | { |
2043 | return vaddr; | 2005 | return vaddr; |