aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c237
1 files changed, 114 insertions, 123 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2d4477c71473..2a36f7f7c4c7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -145,11 +145,20 @@ struct kvm_rmap_desc {
145 struct kvm_rmap_desc *more; 145 struct kvm_rmap_desc *more;
146}; 146};
147 147
148struct kvm_shadow_walk { 148struct kvm_shadow_walk_iterator {
149 int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu, 149 u64 addr;
150 u64 addr, u64 *spte, int level); 150 hpa_t shadow_addr;
151 int level;
152 u64 *sptep;
153 unsigned index;
151}; 154};
152 155
156#define for_each_shadow_entry(_vcpu, _addr, _walker) \
157 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
158 shadow_walk_okay(&(_walker)); \
159 shadow_walk_next(&(_walker)))
160
161
153struct kvm_unsync_walk { 162struct kvm_unsync_walk {
154 int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk); 163 int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
155}; 164};
@@ -343,7 +352,6 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
343 352
344 BUG_ON(!mc->nobjs); 353 BUG_ON(!mc->nobjs);
345 p = mc->objects[--mc->nobjs]; 354 p = mc->objects[--mc->nobjs];
346 memset(p, 0, size);
347 return p; 355 return p;
348} 356}
349 357
@@ -794,10 +802,8 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
794 set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 802 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
795 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); 803 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
796 INIT_LIST_HEAD(&sp->oos_link); 804 INIT_LIST_HEAD(&sp->oos_link);
797 ASSERT(is_empty_shadow_page(sp->spt));
798 bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); 805 bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
799 sp->multimapped = 0; 806 sp->multimapped = 0;
800 sp->global = 1;
801 sp->parent_pte = parent_pte; 807 sp->parent_pte = parent_pte;
802 --vcpu->kvm->arch.n_free_mmu_pages; 808 --vcpu->kvm->arch.n_free_mmu_pages;
803 return sp; 809 return sp;
@@ -983,8 +989,8 @@ struct kvm_mmu_pages {
983 idx < 512; \ 989 idx < 512; \
984 idx = find_next_bit(bitmap, 512, idx+1)) 990 idx = find_next_bit(bitmap, 512, idx+1))
985 991
986int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, 992static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
987 int idx) 993 int idx)
988{ 994{
989 int i; 995 int i;
990 996
@@ -1059,7 +1065,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
1059 index = kvm_page_table_hashfn(gfn); 1065 index = kvm_page_table_hashfn(gfn);
1060 bucket = &kvm->arch.mmu_page_hash[index]; 1066 bucket = &kvm->arch.mmu_page_hash[index];
1061 hlist_for_each_entry(sp, node, bucket, hash_link) 1067 hlist_for_each_entry(sp, node, bucket, hash_link)
1062 if (sp->gfn == gfn && !sp->role.metaphysical 1068 if (sp->gfn == gfn && !sp->role.direct
1063 && !sp->role.invalid) { 1069 && !sp->role.invalid) {
1064 pgprintk("%s: found role %x\n", 1070 pgprintk("%s: found role %x\n",
1065 __func__, sp->role.word); 1071 __func__, sp->role.word);
@@ -1115,8 +1121,9 @@ struct mmu_page_path {
1115 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ 1121 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
1116 i = mmu_pages_next(&pvec, &parents, i)) 1122 i = mmu_pages_next(&pvec, &parents, i))
1117 1123
1118int mmu_pages_next(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents, 1124static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1119 int i) 1125 struct mmu_page_path *parents,
1126 int i)
1120{ 1127{
1121 int n; 1128 int n;
1122 1129
@@ -1135,7 +1142,7 @@ int mmu_pages_next(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents,
1135 return n; 1142 return n;
1136} 1143}
1137 1144
1138void mmu_pages_clear_parents(struct mmu_page_path *parents) 1145static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1139{ 1146{
1140 struct kvm_mmu_page *sp; 1147 struct kvm_mmu_page *sp;
1141 unsigned int level = 0; 1148 unsigned int level = 0;
@@ -1193,7 +1200,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1193 gfn_t gfn, 1200 gfn_t gfn,
1194 gva_t gaddr, 1201 gva_t gaddr,
1195 unsigned level, 1202 unsigned level,
1196 int metaphysical, 1203 int direct,
1197 unsigned access, 1204 unsigned access,
1198 u64 *parent_pte) 1205 u64 *parent_pte)
1199{ 1206{
@@ -1204,10 +1211,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1204 struct kvm_mmu_page *sp; 1211 struct kvm_mmu_page *sp;
1205 struct hlist_node *node, *tmp; 1212 struct hlist_node *node, *tmp;
1206 1213
1207 role.word = 0; 1214 role = vcpu->arch.mmu.base_role;
1208 role.glevels = vcpu->arch.mmu.root_level;
1209 role.level = level; 1215 role.level = level;
1210 role.metaphysical = metaphysical; 1216 role.direct = direct;
1211 role.access = access; 1217 role.access = access;
1212 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { 1218 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1213 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); 1219 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
@@ -1242,8 +1248,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1242 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); 1248 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1243 sp->gfn = gfn; 1249 sp->gfn = gfn;
1244 sp->role = role; 1250 sp->role = role;
1251 sp->global = role.cr4_pge;
1245 hlist_add_head(&sp->hash_link, bucket); 1252 hlist_add_head(&sp->hash_link, bucket);
1246 if (!metaphysical) { 1253 if (!direct) {
1247 if (rmap_write_protect(vcpu->kvm, gfn)) 1254 if (rmap_write_protect(vcpu->kvm, gfn))
1248 kvm_flush_remote_tlbs(vcpu->kvm); 1255 kvm_flush_remote_tlbs(vcpu->kvm);
1249 account_shadowed(vcpu->kvm, gfn); 1256 account_shadowed(vcpu->kvm, gfn);
@@ -1255,35 +1262,35 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1255 return sp; 1262 return sp;
1256} 1263}
1257 1264
1258static int walk_shadow(struct kvm_shadow_walk *walker, 1265static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1259 struct kvm_vcpu *vcpu, u64 addr) 1266 struct kvm_vcpu *vcpu, u64 addr)
1260{ 1267{
1261 hpa_t shadow_addr; 1268 iterator->addr = addr;
1262 int level; 1269 iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1263 int r; 1270 iterator->level = vcpu->arch.mmu.shadow_root_level;
1264 u64 *sptep; 1271 if (iterator->level == PT32E_ROOT_LEVEL) {
1265 unsigned index; 1272 iterator->shadow_addr
1266 1273 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1267 shadow_addr = vcpu->arch.mmu.root_hpa; 1274 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1268 level = vcpu->arch.mmu.shadow_root_level; 1275 --iterator->level;
1269 if (level == PT32E_ROOT_LEVEL) { 1276 if (!iterator->shadow_addr)
1270 shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; 1277 iterator->level = 0;
1271 shadow_addr &= PT64_BASE_ADDR_MASK;
1272 if (!shadow_addr)
1273 return 1;
1274 --level;
1275 } 1278 }
1279}
1276 1280
1277 while (level >= PT_PAGE_TABLE_LEVEL) { 1281static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1278 index = SHADOW_PT_INDEX(addr, level); 1282{
1279 sptep = ((u64 *)__va(shadow_addr)) + index; 1283 if (iterator->level < PT_PAGE_TABLE_LEVEL)
1280 r = walker->entry(walker, vcpu, addr, sptep, level); 1284 return false;
1281 if (r) 1285 iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1282 return r; 1286 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1283 shadow_addr = *sptep & PT64_BASE_ADDR_MASK; 1287 return true;
1284 --level; 1288}
1285 } 1289
1286 return 0; 1290static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1291{
1292 iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1293 --iterator->level;
1287} 1294}
1288 1295
1289static void kvm_mmu_page_unlink_children(struct kvm *kvm, 1296static void kvm_mmu_page_unlink_children(struct kvm *kvm,
@@ -1388,7 +1395,7 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1388 kvm_mmu_page_unlink_children(kvm, sp); 1395 kvm_mmu_page_unlink_children(kvm, sp);
1389 kvm_mmu_unlink_parents(kvm, sp); 1396 kvm_mmu_unlink_parents(kvm, sp);
1390 kvm_flush_remote_tlbs(kvm); 1397 kvm_flush_remote_tlbs(kvm);
1391 if (!sp->role.invalid && !sp->role.metaphysical) 1398 if (!sp->role.invalid && !sp->role.direct)
1392 unaccount_shadowed(kvm, sp->gfn); 1399 unaccount_shadowed(kvm, sp->gfn);
1393 if (sp->unsync) 1400 if (sp->unsync)
1394 kvm_unlink_unsync_page(kvm, sp); 1401 kvm_unlink_unsync_page(kvm, sp);
@@ -1451,7 +1458,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1451 index = kvm_page_table_hashfn(gfn); 1458 index = kvm_page_table_hashfn(gfn);
1452 bucket = &kvm->arch.mmu_page_hash[index]; 1459 bucket = &kvm->arch.mmu_page_hash[index];
1453 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) 1460 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1454 if (sp->gfn == gfn && !sp->role.metaphysical) { 1461 if (sp->gfn == gfn && !sp->role.direct) {
1455 pgprintk("%s: gfn %lx role %x\n", __func__, gfn, 1462 pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1456 sp->role.word); 1463 sp->role.word);
1457 r = 1; 1464 r = 1;
@@ -1463,11 +1470,20 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1463 1470
1464static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) 1471static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1465{ 1472{
1473 unsigned index;
1474 struct hlist_head *bucket;
1466 struct kvm_mmu_page *sp; 1475 struct kvm_mmu_page *sp;
1476 struct hlist_node *node, *nn;
1467 1477
1468 while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) { 1478 index = kvm_page_table_hashfn(gfn);
1469 pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word); 1479 bucket = &kvm->arch.mmu_page_hash[index];
1470 kvm_mmu_zap_page(kvm, sp); 1480 hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
1481 if (sp->gfn == gfn && !sp->role.direct
1482 && !sp->role.invalid) {
1483 pgprintk("%s: zap %lx %x\n",
1484 __func__, gfn, sp->role.word);
1485 kvm_mmu_zap_page(kvm, sp);
1486 }
1471 } 1487 }
1472} 1488}
1473 1489
@@ -1622,7 +1638,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1622 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 1638 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1623 /* don't unsync if pagetable is shadowed with multiple roles */ 1639 /* don't unsync if pagetable is shadowed with multiple roles */
1624 hlist_for_each_entry_safe(s, node, n, bucket, hash_link) { 1640 hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
1625 if (s->gfn != sp->gfn || s->role.metaphysical) 1641 if (s->gfn != sp->gfn || s->role.direct)
1626 continue; 1642 continue;
1627 if (s->role.word != sp->role.word) 1643 if (s->role.word != sp->role.word)
1628 return 1; 1644 return 1;
@@ -1669,8 +1685,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1669 u64 mt_mask = shadow_mt_mask; 1685 u64 mt_mask = shadow_mt_mask;
1670 struct kvm_mmu_page *sp = page_header(__pa(shadow_pte)); 1686 struct kvm_mmu_page *sp = page_header(__pa(shadow_pte));
1671 1687
1672 if (!(vcpu->arch.cr4 & X86_CR4_PGE))
1673 global = 0;
1674 if (!global && sp->global) { 1688 if (!global && sp->global) {
1675 sp->global = 0; 1689 sp->global = 0;
1676 if (sp->unsync) { 1690 if (sp->unsync) {
@@ -1777,12 +1791,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1777 pgprintk("hfn old %lx new %lx\n", 1791 pgprintk("hfn old %lx new %lx\n",
1778 spte_to_pfn(*shadow_pte), pfn); 1792 spte_to_pfn(*shadow_pte), pfn);
1779 rmap_remove(vcpu->kvm, shadow_pte); 1793 rmap_remove(vcpu->kvm, shadow_pte);
1780 } else { 1794 } else
1781 if (largepage) 1795 was_rmapped = 1;
1782 was_rmapped = is_large_pte(*shadow_pte);
1783 else
1784 was_rmapped = 1;
1785 }
1786 } 1796 }
1787 if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault, 1797 if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
1788 dirty, largepage, global, gfn, pfn, speculative, true)) { 1798 dirty, largepage, global, gfn, pfn, speculative, true)) {
@@ -1820,67 +1830,42 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1820{ 1830{
1821} 1831}
1822 1832
1823struct direct_shadow_walk { 1833static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1824 struct kvm_shadow_walk walker; 1834 int largepage, gfn_t gfn, pfn_t pfn)
1825 pfn_t pfn;
1826 int write;
1827 int largepage;
1828 int pt_write;
1829};
1830
1831static int direct_map_entry(struct kvm_shadow_walk *_walk,
1832 struct kvm_vcpu *vcpu,
1833 u64 addr, u64 *sptep, int level)
1834{ 1835{
1835 struct direct_shadow_walk *walk = 1836 struct kvm_shadow_walk_iterator iterator;
1836 container_of(_walk, struct direct_shadow_walk, walker);
1837 struct kvm_mmu_page *sp; 1837 struct kvm_mmu_page *sp;
1838 int pt_write = 0;
1838 gfn_t pseudo_gfn; 1839 gfn_t pseudo_gfn;
1839 gfn_t gfn = addr >> PAGE_SHIFT;
1840
1841 if (level == PT_PAGE_TABLE_LEVEL
1842 || (walk->largepage && level == PT_DIRECTORY_LEVEL)) {
1843 mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL,
1844 0, walk->write, 1, &walk->pt_write,
1845 walk->largepage, 0, gfn, walk->pfn, false);
1846 ++vcpu->stat.pf_fixed;
1847 return 1;
1848 }
1849 1840
1850 if (*sptep == shadow_trap_nonpresent_pte) { 1841 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
1851 pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT; 1842 if (iterator.level == PT_PAGE_TABLE_LEVEL
1852 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1, 1843 || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
1853 1, ACC_ALL, sptep); 1844 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1854 if (!sp) { 1845 0, write, 1, &pt_write,
1855 pgprintk("nonpaging_map: ENOMEM\n"); 1846 largepage, 0, gfn, pfn, false);
1856 kvm_release_pfn_clean(walk->pfn); 1847 ++vcpu->stat.pf_fixed;
1857 return -ENOMEM; 1848 break;
1858 } 1849 }
1859 1850
1860 set_shadow_pte(sptep, 1851 if (*iterator.sptep == shadow_trap_nonpresent_pte) {
1861 __pa(sp->spt) 1852 pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
1862 | PT_PRESENT_MASK | PT_WRITABLE_MASK 1853 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
1863 | shadow_user_mask | shadow_x_mask); 1854 iterator.level - 1,
1864 } 1855 1, ACC_ALL, iterator.sptep);
1865 return 0; 1856 if (!sp) {
1866} 1857 pgprintk("nonpaging_map: ENOMEM\n");
1858 kvm_release_pfn_clean(pfn);
1859 return -ENOMEM;
1860 }
1867 1861
1868static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, 1862 set_shadow_pte(iterator.sptep,
1869 int largepage, gfn_t gfn, pfn_t pfn) 1863 __pa(sp->spt)
1870{ 1864 | PT_PRESENT_MASK | PT_WRITABLE_MASK
1871 int r; 1865 | shadow_user_mask | shadow_x_mask);
1872 struct direct_shadow_walk walker = { 1866 }
1873 .walker = { .entry = direct_map_entry, }, 1867 }
1874 .pfn = pfn, 1868 return pt_write;
1875 .largepage = largepage,
1876 .write = write,
1877 .pt_write = 0,
1878 };
1879
1880 r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT);
1881 if (r < 0)
1882 return r;
1883 return walker.pt_write;
1884} 1869}
1885 1870
1886static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) 1871static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
@@ -1962,7 +1947,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1962 int i; 1947 int i;
1963 gfn_t root_gfn; 1948 gfn_t root_gfn;
1964 struct kvm_mmu_page *sp; 1949 struct kvm_mmu_page *sp;
1965 int metaphysical = 0; 1950 int direct = 0;
1966 1951
1967 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; 1952 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1968 1953
@@ -1971,18 +1956,18 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1971 1956
1972 ASSERT(!VALID_PAGE(root)); 1957 ASSERT(!VALID_PAGE(root));
1973 if (tdp_enabled) 1958 if (tdp_enabled)
1974 metaphysical = 1; 1959 direct = 1;
1975 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, 1960 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1976 PT64_ROOT_LEVEL, metaphysical, 1961 PT64_ROOT_LEVEL, direct,
1977 ACC_ALL, NULL); 1962 ACC_ALL, NULL);
1978 root = __pa(sp->spt); 1963 root = __pa(sp->spt);
1979 ++sp->root_count; 1964 ++sp->root_count;
1980 vcpu->arch.mmu.root_hpa = root; 1965 vcpu->arch.mmu.root_hpa = root;
1981 return; 1966 return;
1982 } 1967 }
1983 metaphysical = !is_paging(vcpu); 1968 direct = !is_paging(vcpu);
1984 if (tdp_enabled) 1969 if (tdp_enabled)
1985 metaphysical = 1; 1970 direct = 1;
1986 for (i = 0; i < 4; ++i) { 1971 for (i = 0; i < 4; ++i) {
1987 hpa_t root = vcpu->arch.mmu.pae_root[i]; 1972 hpa_t root = vcpu->arch.mmu.pae_root[i];
1988 1973
@@ -1996,7 +1981,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1996 } else if (vcpu->arch.mmu.root_level == 0) 1981 } else if (vcpu->arch.mmu.root_level == 0)
1997 root_gfn = 0; 1982 root_gfn = 0;
1998 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, 1983 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1999 PT32_ROOT_LEVEL, metaphysical, 1984 PT32_ROOT_LEVEL, direct,
2000 ACC_ALL, NULL); 1985 ACC_ALL, NULL);
2001 root = __pa(sp->spt); 1986 root = __pa(sp->spt);
2002 ++sp->root_count; 1987 ++sp->root_count;
@@ -2251,17 +2236,23 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2251 2236
2252static int init_kvm_softmmu(struct kvm_vcpu *vcpu) 2237static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2253{ 2238{
2239 int r;
2240
2254 ASSERT(vcpu); 2241 ASSERT(vcpu);
2255 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); 2242 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2256 2243
2257 if (!is_paging(vcpu)) 2244 if (!is_paging(vcpu))
2258 return nonpaging_init_context(vcpu); 2245 r = nonpaging_init_context(vcpu);
2259 else if (is_long_mode(vcpu)) 2246 else if (is_long_mode(vcpu))
2260 return paging64_init_context(vcpu); 2247 r = paging64_init_context(vcpu);
2261 else if (is_pae(vcpu)) 2248 else if (is_pae(vcpu))
2262 return paging32E_init_context(vcpu); 2249 r = paging32E_init_context(vcpu);
2263 else 2250 else
2264 return paging32_init_context(vcpu); 2251 r = paging32_init_context(vcpu);
2252
2253 vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
2254
2255 return r;
2265} 2256}
2266 2257
2267static int init_kvm_mmu(struct kvm_vcpu *vcpu) 2258static int init_kvm_mmu(struct kvm_vcpu *vcpu)
@@ -2492,7 +2483,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2492 index = kvm_page_table_hashfn(gfn); 2483 index = kvm_page_table_hashfn(gfn);
2493 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 2484 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
2494 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { 2485 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
2495 if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid) 2486 if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
2496 continue; 2487 continue;
2497 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; 2488 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
2498 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); 2489 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
@@ -3130,7 +3121,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
3130 gfn_t gfn; 3121 gfn_t gfn;
3131 3122
3132 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) { 3123 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3133 if (sp->role.metaphysical) 3124 if (sp->role.direct)
3134 continue; 3125 continue;
3135 3126
3136 gfn = unalias_gfn(vcpu->kvm, sp->gfn); 3127 gfn = unalias_gfn(vcpu->kvm, sp->gfn);