aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r--drivers/kvm/mmu.c103
1 files changed, 41 insertions, 62 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 1199d3f32ac3..48d28f1ff4a1 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -281,24 +281,15 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
281 return p; 281 return p;
282} 282}
283 283
284static void mmu_memory_cache_free(struct kvm_mmu_memory_cache *mc, void *obj)
285{
286 if (mc->nobjs < KVM_NR_MEM_OBJS)
287 mc->objects[mc->nobjs++] = obj;
288 else
289 kfree(obj);
290}
291
292static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu) 284static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
293{ 285{
294 return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache, 286 return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
295 sizeof(struct kvm_pte_chain)); 287 sizeof(struct kvm_pte_chain));
296} 288}
297 289
298static void mmu_free_pte_chain(struct kvm_vcpu *vcpu, 290static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
299 struct kvm_pte_chain *pc)
300{ 291{
301 mmu_memory_cache_free(&vcpu->mmu_pte_chain_cache, pc); 292 kfree(pc);
302} 293}
303 294
304static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) 295static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
@@ -307,10 +298,9 @@ static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
307 sizeof(struct kvm_rmap_desc)); 298 sizeof(struct kvm_rmap_desc));
308} 299}
309 300
310static void mmu_free_rmap_desc(struct kvm_vcpu *vcpu, 301static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
311 struct kvm_rmap_desc *rd)
312{ 302{
313 mmu_memory_cache_free(&vcpu->mmu_rmap_desc_cache, rd); 303 kfree(rd);
314} 304}
315 305
316/* 306/*
@@ -355,8 +345,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
355 } 345 }
356} 346}
357 347
358static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu, 348static void rmap_desc_remove_entry(struct page *page,
359 struct page *page,
360 struct kvm_rmap_desc *desc, 349 struct kvm_rmap_desc *desc,
361 int i, 350 int i,
362 struct kvm_rmap_desc *prev_desc) 351 struct kvm_rmap_desc *prev_desc)
@@ -376,10 +365,10 @@ static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
376 prev_desc->more = desc->more; 365 prev_desc->more = desc->more;
377 else 366 else
378 set_page_private(page,(unsigned long)desc->more | 1); 367 set_page_private(page,(unsigned long)desc->more | 1);
379 mmu_free_rmap_desc(vcpu, desc); 368 mmu_free_rmap_desc(desc);
380} 369}
381 370
382static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte) 371static void rmap_remove(u64 *spte)
383{ 372{
384 struct page *page; 373 struct page *page;
385 struct kvm_rmap_desc *desc; 374 struct kvm_rmap_desc *desc;
@@ -407,7 +396,7 @@ static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
407 while (desc) { 396 while (desc) {
408 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) 397 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
409 if (desc->shadow_ptes[i] == spte) { 398 if (desc->shadow_ptes[i] == spte) {
410 rmap_desc_remove_entry(vcpu, page, 399 rmap_desc_remove_entry(page,
411 desc, i, 400 desc, i,
412 prev_desc); 401 prev_desc);
413 return; 402 return;
@@ -442,7 +431,7 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
442 BUG_ON(!(*spte & PT_PRESENT_MASK)); 431 BUG_ON(!(*spte & PT_PRESENT_MASK));
443 BUG_ON(!(*spte & PT_WRITABLE_MASK)); 432 BUG_ON(!(*spte & PT_WRITABLE_MASK));
444 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); 433 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
445 rmap_remove(vcpu, spte); 434 rmap_remove(spte);
446 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK); 435 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
447 kvm_flush_remote_tlbs(vcpu->kvm); 436 kvm_flush_remote_tlbs(vcpu->kvm);
448 } 437 }
@@ -464,14 +453,14 @@ static int is_empty_shadow_page(u64 *spt)
464} 453}
465#endif 454#endif
466 455
467static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, 456static void kvm_mmu_free_page(struct kvm *kvm,
468 struct kvm_mmu_page *page_head) 457 struct kvm_mmu_page *page_head)
469{ 458{
470 ASSERT(is_empty_shadow_page(page_head->spt)); 459 ASSERT(is_empty_shadow_page(page_head->spt));
471 list_del(&page_head->link); 460 list_del(&page_head->link);
472 mmu_memory_cache_free(&vcpu->mmu_page_cache, page_head->spt); 461 kfree(page_head->spt);
473 mmu_memory_cache_free(&vcpu->mmu_page_header_cache, page_head); 462 kfree(page_head);
474 ++vcpu->kvm->n_free_mmu_pages; 463 ++kvm->n_free_mmu_pages;
475} 464}
476 465
477static unsigned kvm_page_table_hashfn(gfn_t gfn) 466static unsigned kvm_page_table_hashfn(gfn_t gfn)
@@ -537,8 +526,7 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
537 pte_chain->parent_ptes[0] = parent_pte; 526 pte_chain->parent_ptes[0] = parent_pte;
538} 527}
539 528
540static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu, 529static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
541 struct kvm_mmu_page *page,
542 u64 *parent_pte) 530 u64 *parent_pte)
543{ 531{
544 struct kvm_pte_chain *pte_chain; 532 struct kvm_pte_chain *pte_chain;
@@ -565,7 +553,7 @@ static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu,
565 pte_chain->parent_ptes[i] = NULL; 553 pte_chain->parent_ptes[i] = NULL;
566 if (i == 0) { 554 if (i == 0) {
567 hlist_del(&pte_chain->link); 555 hlist_del(&pte_chain->link);
568 mmu_free_pte_chain(vcpu, pte_chain); 556 mmu_free_pte_chain(pte_chain);
569 if (hlist_empty(&page->parent_ptes)) { 557 if (hlist_empty(&page->parent_ptes)) {
570 page->multimapped = 0; 558 page->multimapped = 0;
571 page->parent_pte = NULL; 559 page->parent_pte = NULL;
@@ -643,7 +631,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
643 return page; 631 return page;
644} 632}
645 633
646static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu, 634static void kvm_mmu_page_unlink_children(struct kvm *kvm,
647 struct kvm_mmu_page *page) 635 struct kvm_mmu_page *page)
648{ 636{
649 unsigned i; 637 unsigned i;
@@ -655,10 +643,10 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
655 if (page->role.level == PT_PAGE_TABLE_LEVEL) { 643 if (page->role.level == PT_PAGE_TABLE_LEVEL) {
656 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { 644 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
657 if (pt[i] & PT_PRESENT_MASK) 645 if (pt[i] & PT_PRESENT_MASK)
658 rmap_remove(vcpu, &pt[i]); 646 rmap_remove(&pt[i]);
659 pt[i] = 0; 647 pt[i] = 0;
660 } 648 }
661 kvm_flush_remote_tlbs(vcpu->kvm); 649 kvm_flush_remote_tlbs(kvm);
662 return; 650 return;
663 } 651 }
664 652
@@ -669,19 +657,18 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
669 if (!(ent & PT_PRESENT_MASK)) 657 if (!(ent & PT_PRESENT_MASK))
670 continue; 658 continue;
671 ent &= PT64_BASE_ADDR_MASK; 659 ent &= PT64_BASE_ADDR_MASK;
672 mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]); 660 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
673 } 661 }
674 kvm_flush_remote_tlbs(vcpu->kvm); 662 kvm_flush_remote_tlbs(kvm);
675} 663}
676 664
677static void kvm_mmu_put_page(struct kvm_vcpu *vcpu, 665static void kvm_mmu_put_page(struct kvm_mmu_page *page,
678 struct kvm_mmu_page *page,
679 u64 *parent_pte) 666 u64 *parent_pte)
680{ 667{
681 mmu_page_remove_parent_pte(vcpu, page, parent_pte); 668 mmu_page_remove_parent_pte(page, parent_pte);
682} 669}
683 670
684static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu, 671static void kvm_mmu_zap_page(struct kvm *kvm,
685 struct kvm_mmu_page *page) 672 struct kvm_mmu_page *page)
686{ 673{
687 u64 *parent_pte; 674 u64 *parent_pte;
@@ -697,15 +684,15 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
697 parent_pte = chain->parent_ptes[0]; 684 parent_pte = chain->parent_ptes[0];
698 } 685 }
699 BUG_ON(!parent_pte); 686 BUG_ON(!parent_pte);
700 kvm_mmu_put_page(vcpu, page, parent_pte); 687 kvm_mmu_put_page(page, parent_pte);
701 set_shadow_pte(parent_pte, 0); 688 set_shadow_pte(parent_pte, 0);
702 } 689 }
703 kvm_mmu_page_unlink_children(vcpu, page); 690 kvm_mmu_page_unlink_children(kvm, page);
704 if (!page->root_count) { 691 if (!page->root_count) {
705 hlist_del(&page->hash_link); 692 hlist_del(&page->hash_link);
706 kvm_mmu_free_page(vcpu, page); 693 kvm_mmu_free_page(kvm, page);
707 } else 694 } else
708 list_move(&page->link, &vcpu->kvm->active_mmu_pages); 695 list_move(&page->link, &kvm->active_mmu_pages);
709} 696}
710 697
711static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) 698static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -724,7 +711,7 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
724 if (page->gfn == gfn && !page->role.metaphysical) { 711 if (page->gfn == gfn && !page->role.metaphysical) {
725 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, 712 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
726 page->role.word); 713 page->role.word);
727 kvm_mmu_zap_page(vcpu, page); 714 kvm_mmu_zap_page(vcpu->kvm, page);
728 r = 1; 715 r = 1;
729 } 716 }
730 return r; 717 return r;
@@ -737,7 +724,7 @@ static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn)
737 while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) { 724 while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
738 pgprintk("%s: zap %lx %x\n", 725 pgprintk("%s: zap %lx %x\n",
739 __FUNCTION__, gfn, page->role.word); 726 __FUNCTION__, gfn, page->role.word);
740 kvm_mmu_zap_page(vcpu, page); 727 kvm_mmu_zap_page(vcpu->kvm, page);
741 } 728 }
742} 729}
743 730
@@ -1089,10 +1076,10 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1089 pte = *spte; 1076 pte = *spte;
1090 if (is_present_pte(pte)) { 1077 if (is_present_pte(pte)) {
1091 if (page->role.level == PT_PAGE_TABLE_LEVEL) 1078 if (page->role.level == PT_PAGE_TABLE_LEVEL)
1092 rmap_remove(vcpu, spte); 1079 rmap_remove(spte);
1093 else { 1080 else {
1094 child = page_header(pte & PT64_BASE_ADDR_MASK); 1081 child = page_header(pte & PT64_BASE_ADDR_MASK);
1095 mmu_page_remove_parent_pte(vcpu, child, spte); 1082 mmu_page_remove_parent_pte(child, spte);
1096 } 1083 }
1097 } 1084 }
1098 *spte = 0; 1085 *spte = 0;
@@ -1161,7 +1148,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1161 */ 1148 */
1162 pgprintk("misaligned: gpa %llx bytes %d role %x\n", 1149 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1163 gpa, bytes, page->role.word); 1150 gpa, bytes, page->role.word);
1164 kvm_mmu_zap_page(vcpu, page); 1151 kvm_mmu_zap_page(vcpu->kvm, page);
1165 continue; 1152 continue;
1166 } 1153 }
1167 page_offset = offset; 1154 page_offset = offset;
@@ -1207,7 +1194,7 @@ void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1207 1194
1208 page = container_of(vcpu->kvm->active_mmu_pages.prev, 1195 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1209 struct kvm_mmu_page, link); 1196 struct kvm_mmu_page, link);
1210 kvm_mmu_zap_page(vcpu, page); 1197 kvm_mmu_zap_page(vcpu->kvm, page);
1211 } 1198 }
1212} 1199}
1213EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages); 1200EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages);
@@ -1219,7 +1206,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
1219 while (!list_empty(&vcpu->kvm->active_mmu_pages)) { 1206 while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
1220 page = container_of(vcpu->kvm->active_mmu_pages.next, 1207 page = container_of(vcpu->kvm->active_mmu_pages.next,
1221 struct kvm_mmu_page, link); 1208 struct kvm_mmu_page, link);
1222 kvm_mmu_zap_page(vcpu, page); 1209 kvm_mmu_zap_page(vcpu->kvm, page);
1223 } 1210 }
1224 free_page((unsigned long)vcpu->mmu.pae_root); 1211 free_page((unsigned long)vcpu->mmu.pae_root);
1225} 1212}
@@ -1277,9 +1264,8 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1277 mmu_free_memory_caches(vcpu); 1264 mmu_free_memory_caches(vcpu);
1278} 1265}
1279 1266
1280void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot) 1267void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1281{ 1268{
1282 struct kvm *kvm = vcpu->kvm;
1283 struct kvm_mmu_page *page; 1269 struct kvm_mmu_page *page;
1284 1270
1285 list_for_each_entry(page, &kvm->active_mmu_pages, link) { 1271 list_for_each_entry(page, &kvm->active_mmu_pages, link) {
@@ -1293,27 +1279,20 @@ void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
1293 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) 1279 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1294 /* avoid RMW */ 1280 /* avoid RMW */
1295 if (pt[i] & PT_WRITABLE_MASK) { 1281 if (pt[i] & PT_WRITABLE_MASK) {
1296 rmap_remove(vcpu, &pt[i]); 1282 rmap_remove(&pt[i]);
1297 pt[i] &= ~PT_WRITABLE_MASK; 1283 pt[i] &= ~PT_WRITABLE_MASK;
1298 } 1284 }
1299 } 1285 }
1300} 1286}
1301 1287
1302void kvm_mmu_zap_all(struct kvm_vcpu *vcpu) 1288void kvm_mmu_zap_all(struct kvm *kvm)
1303{ 1289{
1304 destroy_kvm_mmu(vcpu); 1290 struct kvm_mmu_page *page, *node;
1305 1291
1306 while (!list_empty(&vcpu->kvm->active_mmu_pages)) { 1292 list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
1307 struct kvm_mmu_page *page; 1293 kvm_mmu_zap_page(kvm, page);
1308
1309 page = container_of(vcpu->kvm->active_mmu_pages.next,
1310 struct kvm_mmu_page, link);
1311 kvm_mmu_zap_page(vcpu, page);
1312 }
1313 1294
1314 mmu_free_memory_caches(vcpu); 1295 kvm_flush_remote_tlbs(kvm);
1315 kvm_flush_remote_tlbs(vcpu->kvm);
1316 init_kvm_mmu(vcpu);
1317} 1296}
1318 1297
1319void kvm_mmu_module_exit(void) 1298void kvm_mmu_module_exit(void)