aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c89
1 files changed, 37 insertions, 52 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2ad6f5481671..36c5406b1813 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -79,36 +79,6 @@ static int dbg = 1;
79 } 79 }
80#endif 80#endif
81 81
82#define PT64_PT_BITS 9
83#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
84#define PT32_PT_BITS 10
85#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
86
87#define PT_WRITABLE_SHIFT 1
88
89#define PT_PRESENT_MASK (1ULL << 0)
90#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
91#define PT_USER_MASK (1ULL << 2)
92#define PT_PWT_MASK (1ULL << 3)
93#define PT_PCD_MASK (1ULL << 4)
94#define PT_ACCESSED_MASK (1ULL << 5)
95#define PT_DIRTY_MASK (1ULL << 6)
96#define PT_PAGE_SIZE_MASK (1ULL << 7)
97#define PT_PAT_MASK (1ULL << 7)
98#define PT_GLOBAL_MASK (1ULL << 8)
99#define PT64_NX_SHIFT 63
100#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
101
102#define PT_PAT_SHIFT 7
103#define PT_DIR_PAT_SHIFT 12
104#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
105
106#define PT32_DIR_PSE36_SIZE 4
107#define PT32_DIR_PSE36_SHIFT 13
108#define PT32_DIR_PSE36_MASK \
109 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
110
111
112#define PT_FIRST_AVAIL_BITS_SHIFT 9 82#define PT_FIRST_AVAIL_BITS_SHIFT 9
113#define PT64_SECOND_AVAIL_BITS_SHIFT 52 83#define PT64_SECOND_AVAIL_BITS_SHIFT 52
114 84
@@ -154,10 +124,6 @@ static int dbg = 1;
154#define PFERR_USER_MASK (1U << 2) 124#define PFERR_USER_MASK (1U << 2)
155#define PFERR_FETCH_MASK (1U << 4) 125#define PFERR_FETCH_MASK (1U << 4)
156 126
157#define PT64_ROOT_LEVEL 4
158#define PT32_ROOT_LEVEL 2
159#define PT32E_ROOT_LEVEL 3
160
161#define PT_DIRECTORY_LEVEL 2 127#define PT_DIRECTORY_LEVEL 2
162#define PT_PAGE_TABLE_LEVEL 1 128#define PT_PAGE_TABLE_LEVEL 1
163 129
@@ -186,6 +152,12 @@ static struct kmem_cache *mmu_page_header_cache;
186 152
187static u64 __read_mostly shadow_trap_nonpresent_pte; 153static u64 __read_mostly shadow_trap_nonpresent_pte;
188static u64 __read_mostly shadow_notrap_nonpresent_pte; 154static u64 __read_mostly shadow_notrap_nonpresent_pte;
155static u64 __read_mostly shadow_base_present_pte;
156static u64 __read_mostly shadow_nx_mask;
157static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
158static u64 __read_mostly shadow_user_mask;
159static u64 __read_mostly shadow_accessed_mask;
160static u64 __read_mostly shadow_dirty_mask;
189 161
190void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) 162void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
191{ 163{
@@ -194,6 +166,23 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
194} 166}
195EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes); 167EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
196 168
169void kvm_mmu_set_base_ptes(u64 base_pte)
170{
171 shadow_base_present_pte = base_pte;
172}
173EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
174
175void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
176 u64 dirty_mask, u64 nx_mask, u64 x_mask)
177{
178 shadow_user_mask = user_mask;
179 shadow_accessed_mask = accessed_mask;
180 shadow_dirty_mask = dirty_mask;
181 shadow_nx_mask = nx_mask;
182 shadow_x_mask = x_mask;
183}
184EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
185
197static int is_write_protection(struct kvm_vcpu *vcpu) 186static int is_write_protection(struct kvm_vcpu *vcpu)
198{ 187{
199 return vcpu->arch.cr0 & X86_CR0_WP; 188 return vcpu->arch.cr0 & X86_CR0_WP;
@@ -232,7 +221,7 @@ static int is_writeble_pte(unsigned long pte)
232 221
233static int is_dirty_pte(unsigned long pte) 222static int is_dirty_pte(unsigned long pte)
234{ 223{
235 return pte & PT_DIRTY_MASK; 224 return pte & shadow_dirty_mask;
236} 225}
237 226
238static int is_rmap_pte(u64 pte) 227static int is_rmap_pte(u64 pte)
@@ -387,7 +376,6 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
387 376
388 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn)); 377 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
389 *write_count += 1; 378 *write_count += 1;
390 WARN_ON(*write_count > KVM_PAGES_PER_HPAGE);
391} 379}
392 380
393static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) 381static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
@@ -547,7 +535,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
547 return; 535 return;
548 sp = page_header(__pa(spte)); 536 sp = page_header(__pa(spte));
549 pfn = spte_to_pfn(*spte); 537 pfn = spte_to_pfn(*spte);
550 if (*spte & PT_ACCESSED_MASK) 538 if (*spte & shadow_accessed_mask)
551 kvm_set_pfn_accessed(pfn); 539 kvm_set_pfn_accessed(pfn);
552 if (is_writeble_pte(*spte)) 540 if (is_writeble_pte(*spte))
553 kvm_release_pfn_dirty(pfn); 541 kvm_release_pfn_dirty(pfn);
@@ -1073,17 +1061,17 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1073 * whether the guest actually used the pte (in order to detect 1061 * whether the guest actually used the pte (in order to detect
1074 * demand paging). 1062 * demand paging).
1075 */ 1063 */
1076 spte = PT_PRESENT_MASK | PT_DIRTY_MASK; 1064 spte = shadow_base_present_pte | shadow_dirty_mask;
1077 if (!speculative) 1065 if (!speculative)
1078 pte_access |= PT_ACCESSED_MASK; 1066 pte_access |= PT_ACCESSED_MASK;
1079 if (!dirty) 1067 if (!dirty)
1080 pte_access &= ~ACC_WRITE_MASK; 1068 pte_access &= ~ACC_WRITE_MASK;
1081 if (!(pte_access & ACC_EXEC_MASK)) 1069 if (pte_access & ACC_EXEC_MASK)
1082 spte |= PT64_NX_MASK; 1070 spte |= shadow_x_mask;
1083 1071 else
1084 spte |= PT_PRESENT_MASK; 1072 spte |= shadow_nx_mask;
1085 if (pte_access & ACC_USER_MASK) 1073 if (pte_access & ACC_USER_MASK)
1086 spte |= PT_USER_MASK; 1074 spte |= shadow_user_mask;
1087 if (largepage) 1075 if (largepage)
1088 spte |= PT_PAGE_SIZE_MASK; 1076 spte |= PT_PAGE_SIZE_MASK;
1089 1077
@@ -1188,8 +1176,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1188 return -ENOMEM; 1176 return -ENOMEM;
1189 } 1177 }
1190 1178
1191 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK 1179 table[index] = __pa(new_table->spt)
1192 | PT_WRITABLE_MASK | PT_USER_MASK; 1180 | PT_PRESENT_MASK | PT_WRITABLE_MASK
1181 | shadow_user_mask | shadow_x_mask;
1193 } 1182 }
1194 table_addr = table[index] & PT64_BASE_ADDR_MASK; 1183 table_addr = table[index] & PT64_BASE_ADDR_MASK;
1195 } 1184 }
@@ -1244,7 +1233,6 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
1244 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 1233 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1245 return; 1234 return;
1246 spin_lock(&vcpu->kvm->mmu_lock); 1235 spin_lock(&vcpu->kvm->mmu_lock);
1247#ifdef CONFIG_X86_64
1248 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { 1236 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1249 hpa_t root = vcpu->arch.mmu.root_hpa; 1237 hpa_t root = vcpu->arch.mmu.root_hpa;
1250 1238
@@ -1256,7 +1244,6 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
1256 spin_unlock(&vcpu->kvm->mmu_lock); 1244 spin_unlock(&vcpu->kvm->mmu_lock);
1257 return; 1245 return;
1258 } 1246 }
1259#endif
1260 for (i = 0; i < 4; ++i) { 1247 for (i = 0; i < 4; ++i) {
1261 hpa_t root = vcpu->arch.mmu.pae_root[i]; 1248 hpa_t root = vcpu->arch.mmu.pae_root[i];
1262 1249
@@ -1282,7 +1269,6 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1282 1269
1283 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; 1270 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1284 1271
1285#ifdef CONFIG_X86_64
1286 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { 1272 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1287 hpa_t root = vcpu->arch.mmu.root_hpa; 1273 hpa_t root = vcpu->arch.mmu.root_hpa;
1288 1274
@@ -1297,7 +1283,6 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1297 vcpu->arch.mmu.root_hpa = root; 1283 vcpu->arch.mmu.root_hpa = root;
1298 return; 1284 return;
1299 } 1285 }
1300#endif
1301 metaphysical = !is_paging(vcpu); 1286 metaphysical = !is_paging(vcpu);
1302 if (tdp_enabled) 1287 if (tdp_enabled)
1303 metaphysical = 1; 1288 metaphysical = 1;
@@ -1377,7 +1362,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1377 spin_lock(&vcpu->kvm->mmu_lock); 1362 spin_lock(&vcpu->kvm->mmu_lock);
1378 kvm_mmu_free_some_pages(vcpu); 1363 kvm_mmu_free_some_pages(vcpu);
1379 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK, 1364 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
1380 largepage, gfn, pfn, TDP_ROOT_LEVEL); 1365 largepage, gfn, pfn, kvm_x86_ops->get_tdp_level());
1381 spin_unlock(&vcpu->kvm->mmu_lock); 1366 spin_unlock(&vcpu->kvm->mmu_lock);
1382 1367
1383 return r; 1368 return r;
@@ -1484,7 +1469,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1484 context->page_fault = tdp_page_fault; 1469 context->page_fault = tdp_page_fault;
1485 context->free = nonpaging_free; 1470 context->free = nonpaging_free;
1486 context->prefetch_page = nonpaging_prefetch_page; 1471 context->prefetch_page = nonpaging_prefetch_page;
1487 context->shadow_root_level = TDP_ROOT_LEVEL; 1472 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
1488 context->root_hpa = INVALID_PAGE; 1473 context->root_hpa = INVALID_PAGE;
1489 1474
1490 if (!is_paging(vcpu)) { 1475 if (!is_paging(vcpu)) {
@@ -1633,7 +1618,7 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1633{ 1618{
1634 u64 *spte = vcpu->arch.last_pte_updated; 1619 u64 *spte = vcpu->arch.last_pte_updated;
1635 1620
1636 return !!(spte && (*spte & PT_ACCESSED_MASK)); 1621 return !!(spte && (*spte & shadow_accessed_mask));
1637} 1622}
1638 1623
1639static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 1624static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,