diff options
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r-- | drivers/kvm/mmu.c | 140 |
1 files changed, 66 insertions, 74 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 1199d3f32ac3..1a87ba9d5156 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -154,7 +154,6 @@ struct kvm_rmap_desc { | |||
154 | 154 | ||
155 | static struct kmem_cache *pte_chain_cache; | 155 | static struct kmem_cache *pte_chain_cache; |
156 | static struct kmem_cache *rmap_desc_cache; | 156 | static struct kmem_cache *rmap_desc_cache; |
157 | static struct kmem_cache *mmu_page_cache; | ||
158 | static struct kmem_cache *mmu_page_header_cache; | 157 | static struct kmem_cache *mmu_page_header_cache; |
159 | 158 | ||
160 | static int is_write_protection(struct kvm_vcpu *vcpu) | 159 | static int is_write_protection(struct kvm_vcpu *vcpu) |
@@ -225,6 +224,29 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) | |||
225 | kfree(mc->objects[--mc->nobjs]); | 224 | kfree(mc->objects[--mc->nobjs]); |
226 | } | 225 | } |
227 | 226 | ||
227 | static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, | ||
228 | int min, gfp_t gfp_flags) | ||
229 | { | ||
230 | struct page *page; | ||
231 | |||
232 | if (cache->nobjs >= min) | ||
233 | return 0; | ||
234 | while (cache->nobjs < ARRAY_SIZE(cache->objects)) { | ||
235 | page = alloc_page(gfp_flags); | ||
236 | if (!page) | ||
237 | return -ENOMEM; | ||
238 | set_page_private(page, 0); | ||
239 | cache->objects[cache->nobjs++] = page_address(page); | ||
240 | } | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc) | ||
245 | { | ||
246 | while (mc->nobjs) | ||
247 | free_page((unsigned long)mc->objects[--mc->nobjs]); | ||
248 | } | ||
249 | |||
228 | static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags) | 250 | static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags) |
229 | { | 251 | { |
230 | int r; | 252 | int r; |
@@ -237,8 +259,7 @@ static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags) | |||
237 | rmap_desc_cache, 1, gfp_flags); | 259 | rmap_desc_cache, 1, gfp_flags); |
238 | if (r) | 260 | if (r) |
239 | goto out; | 261 | goto out; |
240 | r = mmu_topup_memory_cache(&vcpu->mmu_page_cache, | 262 | r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 4, gfp_flags); |
241 | mmu_page_cache, 4, gfp_flags); | ||
242 | if (r) | 263 | if (r) |
243 | goto out; | 264 | goto out; |
244 | r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache, | 265 | r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache, |
@@ -266,7 +287,7 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) | |||
266 | { | 287 | { |
267 | mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); | 288 | mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); |
268 | mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache); | 289 | mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache); |
269 | mmu_free_memory_cache(&vcpu->mmu_page_cache); | 290 | mmu_free_memory_cache_page(&vcpu->mmu_page_cache); |
270 | mmu_free_memory_cache(&vcpu->mmu_page_header_cache); | 291 | mmu_free_memory_cache(&vcpu->mmu_page_header_cache); |
271 | } | 292 | } |
272 | 293 | ||
@@ -281,24 +302,15 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, | |||
281 | return p; | 302 | return p; |
282 | } | 303 | } |
283 | 304 | ||
284 | static void mmu_memory_cache_free(struct kvm_mmu_memory_cache *mc, void *obj) | ||
285 | { | ||
286 | if (mc->nobjs < KVM_NR_MEM_OBJS) | ||
287 | mc->objects[mc->nobjs++] = obj; | ||
288 | else | ||
289 | kfree(obj); | ||
290 | } | ||
291 | |||
292 | static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu) | 305 | static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu) |
293 | { | 306 | { |
294 | return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache, | 307 | return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache, |
295 | sizeof(struct kvm_pte_chain)); | 308 | sizeof(struct kvm_pte_chain)); |
296 | } | 309 | } |
297 | 310 | ||
298 | static void mmu_free_pte_chain(struct kvm_vcpu *vcpu, | 311 | static void mmu_free_pte_chain(struct kvm_pte_chain *pc) |
299 | struct kvm_pte_chain *pc) | ||
300 | { | 312 | { |
301 | mmu_memory_cache_free(&vcpu->mmu_pte_chain_cache, pc); | 313 | kfree(pc); |
302 | } | 314 | } |
303 | 315 | ||
304 | static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) | 316 | static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) |
@@ -307,10 +319,9 @@ static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) | |||
307 | sizeof(struct kvm_rmap_desc)); | 319 | sizeof(struct kvm_rmap_desc)); |
308 | } | 320 | } |
309 | 321 | ||
310 | static void mmu_free_rmap_desc(struct kvm_vcpu *vcpu, | 322 | static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd) |
311 | struct kvm_rmap_desc *rd) | ||
312 | { | 323 | { |
313 | mmu_memory_cache_free(&vcpu->mmu_rmap_desc_cache, rd); | 324 | kfree(rd); |
314 | } | 325 | } |
315 | 326 | ||
316 | /* | 327 | /* |
@@ -355,8 +366,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte) | |||
355 | } | 366 | } |
356 | } | 367 | } |
357 | 368 | ||
358 | static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu, | 369 | static void rmap_desc_remove_entry(struct page *page, |
359 | struct page *page, | ||
360 | struct kvm_rmap_desc *desc, | 370 | struct kvm_rmap_desc *desc, |
361 | int i, | 371 | int i, |
362 | struct kvm_rmap_desc *prev_desc) | 372 | struct kvm_rmap_desc *prev_desc) |
@@ -376,10 +386,10 @@ static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu, | |||
376 | prev_desc->more = desc->more; | 386 | prev_desc->more = desc->more; |
377 | else | 387 | else |
378 | set_page_private(page,(unsigned long)desc->more | 1); | 388 | set_page_private(page,(unsigned long)desc->more | 1); |
379 | mmu_free_rmap_desc(vcpu, desc); | 389 | mmu_free_rmap_desc(desc); |
380 | } | 390 | } |
381 | 391 | ||
382 | static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte) | 392 | static void rmap_remove(u64 *spte) |
383 | { | 393 | { |
384 | struct page *page; | 394 | struct page *page; |
385 | struct kvm_rmap_desc *desc; | 395 | struct kvm_rmap_desc *desc; |
@@ -407,7 +417,7 @@ static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte) | |||
407 | while (desc) { | 417 | while (desc) { |
408 | for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) | 418 | for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) |
409 | if (desc->shadow_ptes[i] == spte) { | 419 | if (desc->shadow_ptes[i] == spte) { |
410 | rmap_desc_remove_entry(vcpu, page, | 420 | rmap_desc_remove_entry(page, |
411 | desc, i, | 421 | desc, i, |
412 | prev_desc); | 422 | prev_desc); |
413 | return; | 423 | return; |
@@ -442,7 +452,7 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) | |||
442 | BUG_ON(!(*spte & PT_PRESENT_MASK)); | 452 | BUG_ON(!(*spte & PT_PRESENT_MASK)); |
443 | BUG_ON(!(*spte & PT_WRITABLE_MASK)); | 453 | BUG_ON(!(*spte & PT_WRITABLE_MASK)); |
444 | rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); | 454 | rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); |
445 | rmap_remove(vcpu, spte); | 455 | rmap_remove(spte); |
446 | set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK); | 456 | set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK); |
447 | kvm_flush_remote_tlbs(vcpu->kvm); | 457 | kvm_flush_remote_tlbs(vcpu->kvm); |
448 | } | 458 | } |
@@ -464,14 +474,14 @@ static int is_empty_shadow_page(u64 *spt) | |||
464 | } | 474 | } |
465 | #endif | 475 | #endif |
466 | 476 | ||
467 | static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, | 477 | static void kvm_mmu_free_page(struct kvm *kvm, |
468 | struct kvm_mmu_page *page_head) | 478 | struct kvm_mmu_page *page_head) |
469 | { | 479 | { |
470 | ASSERT(is_empty_shadow_page(page_head->spt)); | 480 | ASSERT(is_empty_shadow_page(page_head->spt)); |
471 | list_del(&page_head->link); | 481 | list_del(&page_head->link); |
472 | mmu_memory_cache_free(&vcpu->mmu_page_cache, page_head->spt); | 482 | __free_page(virt_to_page(page_head->spt)); |
473 | mmu_memory_cache_free(&vcpu->mmu_page_header_cache, page_head); | 483 | kfree(page_head); |
474 | ++vcpu->kvm->n_free_mmu_pages; | 484 | ++kvm->n_free_mmu_pages; |
475 | } | 485 | } |
476 | 486 | ||
477 | static unsigned kvm_page_table_hashfn(gfn_t gfn) | 487 | static unsigned kvm_page_table_hashfn(gfn_t gfn) |
@@ -537,8 +547,7 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, | |||
537 | pte_chain->parent_ptes[0] = parent_pte; | 547 | pte_chain->parent_ptes[0] = parent_pte; |
538 | } | 548 | } |
539 | 549 | ||
540 | static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu, | 550 | static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page, |
541 | struct kvm_mmu_page *page, | ||
542 | u64 *parent_pte) | 551 | u64 *parent_pte) |
543 | { | 552 | { |
544 | struct kvm_pte_chain *pte_chain; | 553 | struct kvm_pte_chain *pte_chain; |
@@ -565,7 +574,7 @@ static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu, | |||
565 | pte_chain->parent_ptes[i] = NULL; | 574 | pte_chain->parent_ptes[i] = NULL; |
566 | if (i == 0) { | 575 | if (i == 0) { |
567 | hlist_del(&pte_chain->link); | 576 | hlist_del(&pte_chain->link); |
568 | mmu_free_pte_chain(vcpu, pte_chain); | 577 | mmu_free_pte_chain(pte_chain); |
569 | if (hlist_empty(&page->parent_ptes)) { | 578 | if (hlist_empty(&page->parent_ptes)) { |
570 | page->multimapped = 0; | 579 | page->multimapped = 0; |
571 | page->parent_pte = NULL; | 580 | page->parent_pte = NULL; |
@@ -643,7 +652,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
643 | return page; | 652 | return page; |
644 | } | 653 | } |
645 | 654 | ||
646 | static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu, | 655 | static void kvm_mmu_page_unlink_children(struct kvm *kvm, |
647 | struct kvm_mmu_page *page) | 656 | struct kvm_mmu_page *page) |
648 | { | 657 | { |
649 | unsigned i; | 658 | unsigned i; |
@@ -655,10 +664,10 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu, | |||
655 | if (page->role.level == PT_PAGE_TABLE_LEVEL) { | 664 | if (page->role.level == PT_PAGE_TABLE_LEVEL) { |
656 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { | 665 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { |
657 | if (pt[i] & PT_PRESENT_MASK) | 666 | if (pt[i] & PT_PRESENT_MASK) |
658 | rmap_remove(vcpu, &pt[i]); | 667 | rmap_remove(&pt[i]); |
659 | pt[i] = 0; | 668 | pt[i] = 0; |
660 | } | 669 | } |
661 | kvm_flush_remote_tlbs(vcpu->kvm); | 670 | kvm_flush_remote_tlbs(kvm); |
662 | return; | 671 | return; |
663 | } | 672 | } |
664 | 673 | ||
@@ -669,19 +678,18 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu, | |||
669 | if (!(ent & PT_PRESENT_MASK)) | 678 | if (!(ent & PT_PRESENT_MASK)) |
670 | continue; | 679 | continue; |
671 | ent &= PT64_BASE_ADDR_MASK; | 680 | ent &= PT64_BASE_ADDR_MASK; |
672 | mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]); | 681 | mmu_page_remove_parent_pte(page_header(ent), &pt[i]); |
673 | } | 682 | } |
674 | kvm_flush_remote_tlbs(vcpu->kvm); | 683 | kvm_flush_remote_tlbs(kvm); |
675 | } | 684 | } |
676 | 685 | ||
677 | static void kvm_mmu_put_page(struct kvm_vcpu *vcpu, | 686 | static void kvm_mmu_put_page(struct kvm_mmu_page *page, |
678 | struct kvm_mmu_page *page, | ||
679 | u64 *parent_pte) | 687 | u64 *parent_pte) |
680 | { | 688 | { |
681 | mmu_page_remove_parent_pte(vcpu, page, parent_pte); | 689 | mmu_page_remove_parent_pte(page, parent_pte); |
682 | } | 690 | } |
683 | 691 | ||
684 | static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu, | 692 | static void kvm_mmu_zap_page(struct kvm *kvm, |
685 | struct kvm_mmu_page *page) | 693 | struct kvm_mmu_page *page) |
686 | { | 694 | { |
687 | u64 *parent_pte; | 695 | u64 *parent_pte; |
@@ -697,15 +705,15 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu, | |||
697 | parent_pte = chain->parent_ptes[0]; | 705 | parent_pte = chain->parent_ptes[0]; |
698 | } | 706 | } |
699 | BUG_ON(!parent_pte); | 707 | BUG_ON(!parent_pte); |
700 | kvm_mmu_put_page(vcpu, page, parent_pte); | 708 | kvm_mmu_put_page(page, parent_pte); |
701 | set_shadow_pte(parent_pte, 0); | 709 | set_shadow_pte(parent_pte, 0); |
702 | } | 710 | } |
703 | kvm_mmu_page_unlink_children(vcpu, page); | 711 | kvm_mmu_page_unlink_children(kvm, page); |
704 | if (!page->root_count) { | 712 | if (!page->root_count) { |
705 | hlist_del(&page->hash_link); | 713 | hlist_del(&page->hash_link); |
706 | kvm_mmu_free_page(vcpu, page); | 714 | kvm_mmu_free_page(kvm, page); |
707 | } else | 715 | } else |
708 | list_move(&page->link, &vcpu->kvm->active_mmu_pages); | 716 | list_move(&page->link, &kvm->active_mmu_pages); |
709 | } | 717 | } |
710 | 718 | ||
711 | static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) | 719 | static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) |
@@ -724,7 +732,7 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) | |||
724 | if (page->gfn == gfn && !page->role.metaphysical) { | 732 | if (page->gfn == gfn && !page->role.metaphysical) { |
725 | pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, | 733 | pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, |
726 | page->role.word); | 734 | page->role.word); |
727 | kvm_mmu_zap_page(vcpu, page); | 735 | kvm_mmu_zap_page(vcpu->kvm, page); |
728 | r = 1; | 736 | r = 1; |
729 | } | 737 | } |
730 | return r; | 738 | return r; |
@@ -737,7 +745,7 @@ static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn) | |||
737 | while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) { | 745 | while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) { |
738 | pgprintk("%s: zap %lx %x\n", | 746 | pgprintk("%s: zap %lx %x\n", |
739 | __FUNCTION__, gfn, page->role.word); | 747 | __FUNCTION__, gfn, page->role.word); |
740 | kvm_mmu_zap_page(vcpu, page); | 748 | kvm_mmu_zap_page(vcpu->kvm, page); |
741 | } | 749 | } |
742 | } | 750 | } |
743 | 751 | ||
@@ -1089,10 +1097,10 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, | |||
1089 | pte = *spte; | 1097 | pte = *spte; |
1090 | if (is_present_pte(pte)) { | 1098 | if (is_present_pte(pte)) { |
1091 | if (page->role.level == PT_PAGE_TABLE_LEVEL) | 1099 | if (page->role.level == PT_PAGE_TABLE_LEVEL) |
1092 | rmap_remove(vcpu, spte); | 1100 | rmap_remove(spte); |
1093 | else { | 1101 | else { |
1094 | child = page_header(pte & PT64_BASE_ADDR_MASK); | 1102 | child = page_header(pte & PT64_BASE_ADDR_MASK); |
1095 | mmu_page_remove_parent_pte(vcpu, child, spte); | 1103 | mmu_page_remove_parent_pte(child, spte); |
1096 | } | 1104 | } |
1097 | } | 1105 | } |
1098 | *spte = 0; | 1106 | *spte = 0; |
@@ -1161,7 +1169,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1161 | */ | 1169 | */ |
1162 | pgprintk("misaligned: gpa %llx bytes %d role %x\n", | 1170 | pgprintk("misaligned: gpa %llx bytes %d role %x\n", |
1163 | gpa, bytes, page->role.word); | 1171 | gpa, bytes, page->role.word); |
1164 | kvm_mmu_zap_page(vcpu, page); | 1172 | kvm_mmu_zap_page(vcpu->kvm, page); |
1165 | continue; | 1173 | continue; |
1166 | } | 1174 | } |
1167 | page_offset = offset; | 1175 | page_offset = offset; |
@@ -1207,7 +1215,7 @@ void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | |||
1207 | 1215 | ||
1208 | page = container_of(vcpu->kvm->active_mmu_pages.prev, | 1216 | page = container_of(vcpu->kvm->active_mmu_pages.prev, |
1209 | struct kvm_mmu_page, link); | 1217 | struct kvm_mmu_page, link); |
1210 | kvm_mmu_zap_page(vcpu, page); | 1218 | kvm_mmu_zap_page(vcpu->kvm, page); |
1211 | } | 1219 | } |
1212 | } | 1220 | } |
1213 | EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages); | 1221 | EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages); |
@@ -1219,7 +1227,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu) | |||
1219 | while (!list_empty(&vcpu->kvm->active_mmu_pages)) { | 1227 | while (!list_empty(&vcpu->kvm->active_mmu_pages)) { |
1220 | page = container_of(vcpu->kvm->active_mmu_pages.next, | 1228 | page = container_of(vcpu->kvm->active_mmu_pages.next, |
1221 | struct kvm_mmu_page, link); | 1229 | struct kvm_mmu_page, link); |
1222 | kvm_mmu_zap_page(vcpu, page); | 1230 | kvm_mmu_zap_page(vcpu->kvm, page); |
1223 | } | 1231 | } |
1224 | free_page((unsigned long)vcpu->mmu.pae_root); | 1232 | free_page((unsigned long)vcpu->mmu.pae_root); |
1225 | } | 1233 | } |
@@ -1277,9 +1285,8 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu) | |||
1277 | mmu_free_memory_caches(vcpu); | 1285 | mmu_free_memory_caches(vcpu); |
1278 | } | 1286 | } |
1279 | 1287 | ||
1280 | void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot) | 1288 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) |
1281 | { | 1289 | { |
1282 | struct kvm *kvm = vcpu->kvm; | ||
1283 | struct kvm_mmu_page *page; | 1290 | struct kvm_mmu_page *page; |
1284 | 1291 | ||
1285 | list_for_each_entry(page, &kvm->active_mmu_pages, link) { | 1292 | list_for_each_entry(page, &kvm->active_mmu_pages, link) { |
@@ -1293,27 +1300,20 @@ void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot) | |||
1293 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) | 1300 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) |
1294 | /* avoid RMW */ | 1301 | /* avoid RMW */ |
1295 | if (pt[i] & PT_WRITABLE_MASK) { | 1302 | if (pt[i] & PT_WRITABLE_MASK) { |
1296 | rmap_remove(vcpu, &pt[i]); | 1303 | rmap_remove(&pt[i]); |
1297 | pt[i] &= ~PT_WRITABLE_MASK; | 1304 | pt[i] &= ~PT_WRITABLE_MASK; |
1298 | } | 1305 | } |
1299 | } | 1306 | } |
1300 | } | 1307 | } |
1301 | 1308 | ||
1302 | void kvm_mmu_zap_all(struct kvm_vcpu *vcpu) | 1309 | void kvm_mmu_zap_all(struct kvm *kvm) |
1303 | { | 1310 | { |
1304 | destroy_kvm_mmu(vcpu); | 1311 | struct kvm_mmu_page *page, *node; |
1305 | |||
1306 | while (!list_empty(&vcpu->kvm->active_mmu_pages)) { | ||
1307 | struct kvm_mmu_page *page; | ||
1308 | 1312 | ||
1309 | page = container_of(vcpu->kvm->active_mmu_pages.next, | 1313 | list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link) |
1310 | struct kvm_mmu_page, link); | 1314 | kvm_mmu_zap_page(kvm, page); |
1311 | kvm_mmu_zap_page(vcpu, page); | ||
1312 | } | ||
1313 | 1315 | ||
1314 | mmu_free_memory_caches(vcpu); | 1316 | kvm_flush_remote_tlbs(kvm); |
1315 | kvm_flush_remote_tlbs(vcpu->kvm); | ||
1316 | init_kvm_mmu(vcpu); | ||
1317 | } | 1317 | } |
1318 | 1318 | ||
1319 | void kvm_mmu_module_exit(void) | 1319 | void kvm_mmu_module_exit(void) |
@@ -1322,8 +1322,6 @@ void kvm_mmu_module_exit(void) | |||
1322 | kmem_cache_destroy(pte_chain_cache); | 1322 | kmem_cache_destroy(pte_chain_cache); |
1323 | if (rmap_desc_cache) | 1323 | if (rmap_desc_cache) |
1324 | kmem_cache_destroy(rmap_desc_cache); | 1324 | kmem_cache_destroy(rmap_desc_cache); |
1325 | if (mmu_page_cache) | ||
1326 | kmem_cache_destroy(mmu_page_cache); | ||
1327 | if (mmu_page_header_cache) | 1325 | if (mmu_page_header_cache) |
1328 | kmem_cache_destroy(mmu_page_header_cache); | 1326 | kmem_cache_destroy(mmu_page_header_cache); |
1329 | } | 1327 | } |
@@ -1341,12 +1339,6 @@ int kvm_mmu_module_init(void) | |||
1341 | if (!rmap_desc_cache) | 1339 | if (!rmap_desc_cache) |
1342 | goto nomem; | 1340 | goto nomem; |
1343 | 1341 | ||
1344 | mmu_page_cache = kmem_cache_create("kvm_mmu_page", | ||
1345 | PAGE_SIZE, | ||
1346 | PAGE_SIZE, 0, NULL); | ||
1347 | if (!mmu_page_cache) | ||
1348 | goto nomem; | ||
1349 | |||
1350 | mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", | 1342 | mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", |
1351 | sizeof(struct kvm_mmu_page), | 1343 | sizeof(struct kvm_mmu_page), |
1352 | 0, 0, NULL); | 1344 | 0, 0, NULL); |