diff options
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 47 |
1 files changed, 37 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ba119dae890e..07673487fd5d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1220,6 +1220,35 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
1220 | return __kvm_sync_page(vcpu, sp, true); | 1220 | return __kvm_sync_page(vcpu, sp, true); |
1221 | } | 1221 | } |
1222 | 1222 | ||
1223 | /* @gfn should be write-protected at the call site */ | ||
1224 | static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) | ||
1225 | { | ||
1226 | struct hlist_head *bucket; | ||
1227 | struct kvm_mmu_page *s; | ||
1228 | struct hlist_node *node, *n; | ||
1229 | unsigned index; | ||
1230 | bool flush = false; | ||
1231 | |||
1232 | index = kvm_page_table_hashfn(gfn); | ||
1233 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; | ||
1234 | hlist_for_each_entry_safe(s, node, n, bucket, hash_link) { | ||
1235 | if (s->gfn != gfn || !s->unsync || s->role.invalid) | ||
1236 | continue; | ||
1237 | |||
1238 | WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); | ||
1239 | if ((s->role.cr4_pae != !!is_pae(vcpu)) || | ||
1240 | (vcpu->arch.mmu.sync_page(vcpu, s))) { | ||
1241 | kvm_mmu_zap_page(vcpu->kvm, s); | ||
1242 | continue; | ||
1243 | } | ||
1244 | kvm_unlink_unsync_page(vcpu->kvm, s); | ||
1245 | flush = true; | ||
1246 | } | ||
1247 | |||
1248 | if (flush) | ||
1249 | kvm_mmu_flush_tlb(vcpu); | ||
1250 | } | ||
1251 | |||
1223 | struct mmu_page_path { | 1252 | struct mmu_page_path { |
1224 | struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1]; | 1253 | struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1]; |
1225 | unsigned int idx[PT64_ROOT_LEVEL-1]; | 1254 | unsigned int idx[PT64_ROOT_LEVEL-1]; |
@@ -1318,8 +1347,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1318 | unsigned index; | 1347 | unsigned index; |
1319 | unsigned quadrant; | 1348 | unsigned quadrant; |
1320 | struct hlist_head *bucket; | 1349 | struct hlist_head *bucket; |
1321 | struct kvm_mmu_page *sp, *unsync_sp = NULL; | 1350 | struct kvm_mmu_page *sp; |
1322 | struct hlist_node *node, *tmp; | 1351 | struct hlist_node *node, *tmp; |
1352 | bool need_sync = false; | ||
1323 | 1353 | ||
1324 | role = vcpu->arch.mmu.base_role; | 1354 | role = vcpu->arch.mmu.base_role; |
1325 | role.level = level; | 1355 | role.level = level; |
@@ -1336,17 +1366,14 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1336 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; | 1366 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; |
1337 | hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link) | 1367 | hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link) |
1338 | if (sp->gfn == gfn) { | 1368 | if (sp->gfn == gfn) { |
1339 | if (sp->unsync) | 1369 | if (!need_sync && sp->unsync) |
1340 | unsync_sp = sp; | 1370 | need_sync = true; |
1341 | 1371 | ||
1342 | if (sp->role.word != role.word) | 1372 | if (sp->role.word != role.word) |
1343 | continue; | 1373 | continue; |
1344 | 1374 | ||
1345 | if (!direct && unsync_sp && | 1375 | if (sp->unsync && kvm_sync_page_transient(vcpu, sp)) |
1346 | kvm_sync_page_transient(vcpu, unsync_sp)) { | ||
1347 | unsync_sp = NULL; | ||
1348 | break; | 1376 | break; |
1349 | } | ||
1350 | 1377 | ||
1351 | mmu_page_add_parent_pte(vcpu, sp, parent_pte); | 1378 | mmu_page_add_parent_pte(vcpu, sp, parent_pte); |
1352 | if (sp->unsync_children) { | 1379 | if (sp->unsync_children) { |
@@ -1358,9 +1385,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1358 | trace_kvm_mmu_get_page(sp, false); | 1385 | trace_kvm_mmu_get_page(sp, false); |
1359 | return sp; | 1386 | return sp; |
1360 | } | 1387 | } |
1361 | if (!direct && unsync_sp) | ||
1362 | kvm_sync_page(vcpu, unsync_sp); | ||
1363 | |||
1364 | ++vcpu->kvm->stat.mmu_cache_miss; | 1388 | ++vcpu->kvm->stat.mmu_cache_miss; |
1365 | sp = kvm_mmu_alloc_page(vcpu, parent_pte); | 1389 | sp = kvm_mmu_alloc_page(vcpu, parent_pte); |
1366 | if (!sp) | 1390 | if (!sp) |
@@ -1371,6 +1395,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1371 | if (!direct) { | 1395 | if (!direct) { |
1372 | if (rmap_write_protect(vcpu->kvm, gfn)) | 1396 | if (rmap_write_protect(vcpu->kvm, gfn)) |
1373 | kvm_flush_remote_tlbs(vcpu->kvm); | 1397 | kvm_flush_remote_tlbs(vcpu->kvm); |
1398 | if (level > PT_PAGE_TABLE_LEVEL && need_sync) | ||
1399 | kvm_sync_pages(vcpu, gfn); | ||
1400 | |||
1374 | account_shadowed(vcpu->kvm, gfn); | 1401 | account_shadowed(vcpu->kvm, gfn); |
1375 | } | 1402 | } |
1376 | if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte) | 1403 | if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte) |