diff options
| -rw-r--r-- | drivers/kvm/kvm.h | 4 | ||||
| -rw-r--r-- | drivers/kvm/kvm_main.c | 68 | ||||
| -rw-r--r-- | drivers/kvm/mmu.c | 103 |
3 files changed, 52 insertions, 123 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 65ab268d4256..6636ae2ee3b5 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
| @@ -535,8 +535,8 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu); | |||
| 535 | int kvm_mmu_setup(struct kvm_vcpu *vcpu); | 535 | int kvm_mmu_setup(struct kvm_vcpu *vcpu); |
| 536 | 536 | ||
| 537 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); | 537 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
| 538 | void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot); | 538 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); |
| 539 | void kvm_mmu_zap_all(struct kvm_vcpu *vcpu); | 539 | void kvm_mmu_zap_all(struct kvm *kvm); |
| 540 | 540 | ||
| 541 | hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); | 541 | hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); |
| 542 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) | 542 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 1b206f197c6b..05f0418f2195 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
| @@ -238,23 +238,6 @@ static void vcpu_load(struct kvm_vcpu *vcpu) | |||
| 238 | kvm_arch_ops->vcpu_load(vcpu); | 238 | kvm_arch_ops->vcpu_load(vcpu); |
| 239 | } | 239 | } |
| 240 | 240 | ||
| 241 | /* | ||
| 242 | * Switches to specified vcpu, until a matching vcpu_put(). Will return NULL | ||
| 243 | * if the slot is not populated. | ||
| 244 | */ | ||
| 245 | static struct kvm_vcpu *vcpu_load_slot(struct kvm *kvm, int slot) | ||
| 246 | { | ||
| 247 | struct kvm_vcpu *vcpu = &kvm->vcpus[slot]; | ||
| 248 | |||
| 249 | mutex_lock(&vcpu->mutex); | ||
| 250 | if (!vcpu->vmcs) { | ||
| 251 | mutex_unlock(&vcpu->mutex); | ||
| 252 | return NULL; | ||
| 253 | } | ||
| 254 | kvm_arch_ops->vcpu_load(vcpu); | ||
| 255 | return vcpu; | ||
| 256 | } | ||
| 257 | |||
| 258 | static void vcpu_put(struct kvm_vcpu *vcpu) | 241 | static void vcpu_put(struct kvm_vcpu *vcpu) |
| 259 | { | 242 | { |
| 260 | kvm_arch_ops->vcpu_put(vcpu); | 243 | kvm_arch_ops->vcpu_put(vcpu); |
| @@ -663,13 +646,6 @@ void fx_init(struct kvm_vcpu *vcpu) | |||
| 663 | } | 646 | } |
| 664 | EXPORT_SYMBOL_GPL(fx_init); | 647 | EXPORT_SYMBOL_GPL(fx_init); |
| 665 | 648 | ||
| 666 | static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot) | ||
| 667 | { | ||
| 668 | spin_lock(&vcpu->kvm->lock); | ||
| 669 | kvm_mmu_slot_remove_write_access(vcpu, slot); | ||
| 670 | spin_unlock(&vcpu->kvm->lock); | ||
| 671 | } | ||
| 672 | |||
| 673 | /* | 649 | /* |
| 674 | * Allocate some memory and give it an address in the guest physical address | 650 | * Allocate some memory and give it an address in the guest physical address |
| 675 | * space. | 651 | * space. |
| @@ -792,19 +768,10 @@ raced: | |||
| 792 | *memslot = new; | 768 | *memslot = new; |
| 793 | ++kvm->memory_config_version; | 769 | ++kvm->memory_config_version; |
| 794 | 770 | ||
| 795 | spin_unlock(&kvm->lock); | 771 | kvm_mmu_slot_remove_write_access(kvm, mem->slot); |
| 796 | 772 | kvm_flush_remote_tlbs(kvm); | |
| 797 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
| 798 | struct kvm_vcpu *vcpu; | ||
| 799 | 773 | ||
| 800 | vcpu = vcpu_load_slot(kvm, i); | 774 | spin_unlock(&kvm->lock); |
| 801 | if (!vcpu) | ||
| 802 | continue; | ||
| 803 | if (new.flags & KVM_MEM_LOG_DIRTY_PAGES) | ||
| 804 | do_remove_write_access(vcpu, mem->slot); | ||
| 805 | kvm_mmu_reset_context(vcpu); | ||
| 806 | vcpu_put(vcpu); | ||
| 807 | } | ||
| 808 | 775 | ||
| 809 | kvm_free_physmem_slot(&old, &new); | 776 | kvm_free_physmem_slot(&old, &new); |
| 810 | return 0; | 777 | return 0; |
| @@ -826,7 +793,6 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
| 826 | struct kvm_memory_slot *memslot; | 793 | struct kvm_memory_slot *memslot; |
| 827 | int r, i; | 794 | int r, i; |
| 828 | int n; | 795 | int n; |
| 829 | int cleared; | ||
| 830 | unsigned long any = 0; | 796 | unsigned long any = 0; |
| 831 | 797 | ||
| 832 | spin_lock(&kvm->lock); | 798 | spin_lock(&kvm->lock); |
| @@ -855,23 +821,11 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
| 855 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) | 821 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) |
| 856 | goto out; | 822 | goto out; |
| 857 | 823 | ||
| 858 | if (any) { | 824 | spin_lock(&kvm->lock); |
| 859 | cleared = 0; | 825 | kvm_mmu_slot_remove_write_access(kvm, log->slot); |
| 860 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 826 | kvm_flush_remote_tlbs(kvm); |
| 861 | struct kvm_vcpu *vcpu; | 827 | memset(memslot->dirty_bitmap, 0, n); |
| 862 | 828 | spin_unlock(&kvm->lock); | |
| 863 | vcpu = vcpu_load_slot(kvm, i); | ||
| 864 | if (!vcpu) | ||
| 865 | continue; | ||
| 866 | if (!cleared) { | ||
| 867 | do_remove_write_access(vcpu, log->slot); | ||
| 868 | memset(memslot->dirty_bitmap, 0, n); | ||
| 869 | cleared = 1; | ||
| 870 | } | ||
| 871 | kvm_arch_ops->tlb_flush(vcpu); | ||
| 872 | vcpu_put(vcpu); | ||
| 873 | } | ||
| 874 | } | ||
| 875 | 829 | ||
| 876 | r = 0; | 830 | r = 0; |
| 877 | 831 | ||
| @@ -920,13 +874,9 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, | |||
| 920 | break; | 874 | break; |
| 921 | kvm->naliases = n; | 875 | kvm->naliases = n; |
| 922 | 876 | ||
| 923 | spin_unlock(&kvm->lock); | 877 | kvm_mmu_zap_all(kvm); |
| 924 | 878 | ||
| 925 | vcpu_load(&kvm->vcpus[0]); | ||
| 926 | spin_lock(&kvm->lock); | ||
| 927 | kvm_mmu_zap_all(&kvm->vcpus[0]); | ||
| 928 | spin_unlock(&kvm->lock); | 879 | spin_unlock(&kvm->lock); |
| 929 | vcpu_put(&kvm->vcpus[0]); | ||
| 930 | 880 | ||
| 931 | return 0; | 881 | return 0; |
| 932 | 882 | ||
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 1199d3f32ac3..48d28f1ff4a1 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
| @@ -281,24 +281,15 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, | |||
| 281 | return p; | 281 | return p; |
| 282 | } | 282 | } |
| 283 | 283 | ||
| 284 | static void mmu_memory_cache_free(struct kvm_mmu_memory_cache *mc, void *obj) | ||
| 285 | { | ||
| 286 | if (mc->nobjs < KVM_NR_MEM_OBJS) | ||
| 287 | mc->objects[mc->nobjs++] = obj; | ||
| 288 | else | ||
| 289 | kfree(obj); | ||
| 290 | } | ||
| 291 | |||
| 292 | static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu) | 284 | static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu) |
| 293 | { | 285 | { |
| 294 | return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache, | 286 | return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache, |
| 295 | sizeof(struct kvm_pte_chain)); | 287 | sizeof(struct kvm_pte_chain)); |
| 296 | } | 288 | } |
| 297 | 289 | ||
| 298 | static void mmu_free_pte_chain(struct kvm_vcpu *vcpu, | 290 | static void mmu_free_pte_chain(struct kvm_pte_chain *pc) |
| 299 | struct kvm_pte_chain *pc) | ||
| 300 | { | 291 | { |
| 301 | mmu_memory_cache_free(&vcpu->mmu_pte_chain_cache, pc); | 292 | kfree(pc); |
| 302 | } | 293 | } |
| 303 | 294 | ||
| 304 | static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) | 295 | static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) |
| @@ -307,10 +298,9 @@ static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) | |||
| 307 | sizeof(struct kvm_rmap_desc)); | 298 | sizeof(struct kvm_rmap_desc)); |
| 308 | } | 299 | } |
| 309 | 300 | ||
| 310 | static void mmu_free_rmap_desc(struct kvm_vcpu *vcpu, | 301 | static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd) |
| 311 | struct kvm_rmap_desc *rd) | ||
| 312 | { | 302 | { |
| 313 | mmu_memory_cache_free(&vcpu->mmu_rmap_desc_cache, rd); | 303 | kfree(rd); |
| 314 | } | 304 | } |
| 315 | 305 | ||
| 316 | /* | 306 | /* |
| @@ -355,8 +345,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte) | |||
| 355 | } | 345 | } |
| 356 | } | 346 | } |
| 357 | 347 | ||
| 358 | static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu, | 348 | static void rmap_desc_remove_entry(struct page *page, |
| 359 | struct page *page, | ||
| 360 | struct kvm_rmap_desc *desc, | 349 | struct kvm_rmap_desc *desc, |
| 361 | int i, | 350 | int i, |
| 362 | struct kvm_rmap_desc *prev_desc) | 351 | struct kvm_rmap_desc *prev_desc) |
| @@ -376,10 +365,10 @@ static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu, | |||
| 376 | prev_desc->more = desc->more; | 365 | prev_desc->more = desc->more; |
| 377 | else | 366 | else |
| 378 | set_page_private(page,(unsigned long)desc->more | 1); | 367 | set_page_private(page,(unsigned long)desc->more | 1); |
| 379 | mmu_free_rmap_desc(vcpu, desc); | 368 | mmu_free_rmap_desc(desc); |
| 380 | } | 369 | } |
| 381 | 370 | ||
| 382 | static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte) | 371 | static void rmap_remove(u64 *spte) |
| 383 | { | 372 | { |
| 384 | struct page *page; | 373 | struct page *page; |
| 385 | struct kvm_rmap_desc *desc; | 374 | struct kvm_rmap_desc *desc; |
| @@ -407,7 +396,7 @@ static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte) | |||
| 407 | while (desc) { | 396 | while (desc) { |
| 408 | for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) | 397 | for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) |
| 409 | if (desc->shadow_ptes[i] == spte) { | 398 | if (desc->shadow_ptes[i] == spte) { |
| 410 | rmap_desc_remove_entry(vcpu, page, | 399 | rmap_desc_remove_entry(page, |
| 411 | desc, i, | 400 | desc, i, |
| 412 | prev_desc); | 401 | prev_desc); |
| 413 | return; | 402 | return; |
| @@ -442,7 +431,7 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) | |||
| 442 | BUG_ON(!(*spte & PT_PRESENT_MASK)); | 431 | BUG_ON(!(*spte & PT_PRESENT_MASK)); |
| 443 | BUG_ON(!(*spte & PT_WRITABLE_MASK)); | 432 | BUG_ON(!(*spte & PT_WRITABLE_MASK)); |
| 444 | rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); | 433 | rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); |
| 445 | rmap_remove(vcpu, spte); | 434 | rmap_remove(spte); |
| 446 | set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK); | 435 | set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK); |
| 447 | kvm_flush_remote_tlbs(vcpu->kvm); | 436 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 448 | } | 437 | } |
| @@ -464,14 +453,14 @@ static int is_empty_shadow_page(u64 *spt) | |||
| 464 | } | 453 | } |
| 465 | #endif | 454 | #endif |
| 466 | 455 | ||
| 467 | static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, | 456 | static void kvm_mmu_free_page(struct kvm *kvm, |
| 468 | struct kvm_mmu_page *page_head) | 457 | struct kvm_mmu_page *page_head) |
| 469 | { | 458 | { |
| 470 | ASSERT(is_empty_shadow_page(page_head->spt)); | 459 | ASSERT(is_empty_shadow_page(page_head->spt)); |
| 471 | list_del(&page_head->link); | 460 | list_del(&page_head->link); |
| 472 | mmu_memory_cache_free(&vcpu->mmu_page_cache, page_head->spt); | 461 | kfree(page_head->spt); |
| 473 | mmu_memory_cache_free(&vcpu->mmu_page_header_cache, page_head); | 462 | kfree(page_head); |
| 474 | ++vcpu->kvm->n_free_mmu_pages; | 463 | ++kvm->n_free_mmu_pages; |
| 475 | } | 464 | } |
| 476 | 465 | ||
| 477 | static unsigned kvm_page_table_hashfn(gfn_t gfn) | 466 | static unsigned kvm_page_table_hashfn(gfn_t gfn) |
| @@ -537,8 +526,7 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, | |||
| 537 | pte_chain->parent_ptes[0] = parent_pte; | 526 | pte_chain->parent_ptes[0] = parent_pte; |
| 538 | } | 527 | } |
| 539 | 528 | ||
| 540 | static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu, | 529 | static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page, |
| 541 | struct kvm_mmu_page *page, | ||
| 542 | u64 *parent_pte) | 530 | u64 *parent_pte) |
| 543 | { | 531 | { |
| 544 | struct kvm_pte_chain *pte_chain; | 532 | struct kvm_pte_chain *pte_chain; |
| @@ -565,7 +553,7 @@ static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu, | |||
| 565 | pte_chain->parent_ptes[i] = NULL; | 553 | pte_chain->parent_ptes[i] = NULL; |
| 566 | if (i == 0) { | 554 | if (i == 0) { |
| 567 | hlist_del(&pte_chain->link); | 555 | hlist_del(&pte_chain->link); |
| 568 | mmu_free_pte_chain(vcpu, pte_chain); | 556 | mmu_free_pte_chain(pte_chain); |
| 569 | if (hlist_empty(&page->parent_ptes)) { | 557 | if (hlist_empty(&page->parent_ptes)) { |
| 570 | page->multimapped = 0; | 558 | page->multimapped = 0; |
| 571 | page->parent_pte = NULL; | 559 | page->parent_pte = NULL; |
| @@ -643,7 +631,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
| 643 | return page; | 631 | return page; |
| 644 | } | 632 | } |
| 645 | 633 | ||
| 646 | static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu, | 634 | static void kvm_mmu_page_unlink_children(struct kvm *kvm, |
| 647 | struct kvm_mmu_page *page) | 635 | struct kvm_mmu_page *page) |
| 648 | { | 636 | { |
| 649 | unsigned i; | 637 | unsigned i; |
| @@ -655,10 +643,10 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu, | |||
| 655 | if (page->role.level == PT_PAGE_TABLE_LEVEL) { | 643 | if (page->role.level == PT_PAGE_TABLE_LEVEL) { |
| 656 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { | 644 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { |
| 657 | if (pt[i] & PT_PRESENT_MASK) | 645 | if (pt[i] & PT_PRESENT_MASK) |
| 658 | rmap_remove(vcpu, &pt[i]); | 646 | rmap_remove(&pt[i]); |
| 659 | pt[i] = 0; | 647 | pt[i] = 0; |
| 660 | } | 648 | } |
| 661 | kvm_flush_remote_tlbs(vcpu->kvm); | 649 | kvm_flush_remote_tlbs(kvm); |
| 662 | return; | 650 | return; |
| 663 | } | 651 | } |
| 664 | 652 | ||
| @@ -669,19 +657,18 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu, | |||
| 669 | if (!(ent & PT_PRESENT_MASK)) | 657 | if (!(ent & PT_PRESENT_MASK)) |
| 670 | continue; | 658 | continue; |
| 671 | ent &= PT64_BASE_ADDR_MASK; | 659 | ent &= PT64_BASE_ADDR_MASK; |
| 672 | mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]); | 660 | mmu_page_remove_parent_pte(page_header(ent), &pt[i]); |
| 673 | } | 661 | } |
| 674 | kvm_flush_remote_tlbs(vcpu->kvm); | 662 | kvm_flush_remote_tlbs(kvm); |
| 675 | } | 663 | } |
| 676 | 664 | ||
| 677 | static void kvm_mmu_put_page(struct kvm_vcpu *vcpu, | 665 | static void kvm_mmu_put_page(struct kvm_mmu_page *page, |
| 678 | struct kvm_mmu_page *page, | ||
| 679 | u64 *parent_pte) | 666 | u64 *parent_pte) |
| 680 | { | 667 | { |
| 681 | mmu_page_remove_parent_pte(vcpu, page, parent_pte); | 668 | mmu_page_remove_parent_pte(page, parent_pte); |
| 682 | } | 669 | } |
| 683 | 670 | ||
| 684 | static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu, | 671 | static void kvm_mmu_zap_page(struct kvm *kvm, |
| 685 | struct kvm_mmu_page *page) | 672 | struct kvm_mmu_page *page) |
| 686 | { | 673 | { |
| 687 | u64 *parent_pte; | 674 | u64 *parent_pte; |
| @@ -697,15 +684,15 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu, | |||
| 697 | parent_pte = chain->parent_ptes[0]; | 684 | parent_pte = chain->parent_ptes[0]; |
| 698 | } | 685 | } |
| 699 | BUG_ON(!parent_pte); | 686 | BUG_ON(!parent_pte); |
| 700 | kvm_mmu_put_page(vcpu, page, parent_pte); | 687 | kvm_mmu_put_page(page, parent_pte); |
| 701 | set_shadow_pte(parent_pte, 0); | 688 | set_shadow_pte(parent_pte, 0); |
| 702 | } | 689 | } |
| 703 | kvm_mmu_page_unlink_children(vcpu, page); | 690 | kvm_mmu_page_unlink_children(kvm, page); |
| 704 | if (!page->root_count) { | 691 | if (!page->root_count) { |
| 705 | hlist_del(&page->hash_link); | 692 | hlist_del(&page->hash_link); |
| 706 | kvm_mmu_free_page(vcpu, page); | 693 | kvm_mmu_free_page(kvm, page); |
| 707 | } else | 694 | } else |
| 708 | list_move(&page->link, &vcpu->kvm->active_mmu_pages); | 695 | list_move(&page->link, &kvm->active_mmu_pages); |
| 709 | } | 696 | } |
| 710 | 697 | ||
| 711 | static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) | 698 | static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) |
| @@ -724,7 +711,7 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) | |||
| 724 | if (page->gfn == gfn && !page->role.metaphysical) { | 711 | if (page->gfn == gfn && !page->role.metaphysical) { |
| 725 | pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, | 712 | pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, |
| 726 | page->role.word); | 713 | page->role.word); |
| 727 | kvm_mmu_zap_page(vcpu, page); | 714 | kvm_mmu_zap_page(vcpu->kvm, page); |
| 728 | r = 1; | 715 | r = 1; |
| 729 | } | 716 | } |
| 730 | return r; | 717 | return r; |
| @@ -737,7 +724,7 @@ static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn) | |||
| 737 | while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) { | 724 | while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) { |
| 738 | pgprintk("%s: zap %lx %x\n", | 725 | pgprintk("%s: zap %lx %x\n", |
| 739 | __FUNCTION__, gfn, page->role.word); | 726 | __FUNCTION__, gfn, page->role.word); |
| 740 | kvm_mmu_zap_page(vcpu, page); | 727 | kvm_mmu_zap_page(vcpu->kvm, page); |
| 741 | } | 728 | } |
| 742 | } | 729 | } |
| 743 | 730 | ||
| @@ -1089,10 +1076,10 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, | |||
| 1089 | pte = *spte; | 1076 | pte = *spte; |
| 1090 | if (is_present_pte(pte)) { | 1077 | if (is_present_pte(pte)) { |
| 1091 | if (page->role.level == PT_PAGE_TABLE_LEVEL) | 1078 | if (page->role.level == PT_PAGE_TABLE_LEVEL) |
| 1092 | rmap_remove(vcpu, spte); | 1079 | rmap_remove(spte); |
| 1093 | else { | 1080 | else { |
| 1094 | child = page_header(pte & PT64_BASE_ADDR_MASK); | 1081 | child = page_header(pte & PT64_BASE_ADDR_MASK); |
| 1095 | mmu_page_remove_parent_pte(vcpu, child, spte); | 1082 | mmu_page_remove_parent_pte(child, spte); |
| 1096 | } | 1083 | } |
| 1097 | } | 1084 | } |
| 1098 | *spte = 0; | 1085 | *spte = 0; |
| @@ -1161,7 +1148,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
| 1161 | */ | 1148 | */ |
| 1162 | pgprintk("misaligned: gpa %llx bytes %d role %x\n", | 1149 | pgprintk("misaligned: gpa %llx bytes %d role %x\n", |
| 1163 | gpa, bytes, page->role.word); | 1150 | gpa, bytes, page->role.word); |
| 1164 | kvm_mmu_zap_page(vcpu, page); | 1151 | kvm_mmu_zap_page(vcpu->kvm, page); |
| 1165 | continue; | 1152 | continue; |
| 1166 | } | 1153 | } |
| 1167 | page_offset = offset; | 1154 | page_offset = offset; |
| @@ -1207,7 +1194,7 @@ void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | |||
| 1207 | 1194 | ||
| 1208 | page = container_of(vcpu->kvm->active_mmu_pages.prev, | 1195 | page = container_of(vcpu->kvm->active_mmu_pages.prev, |
| 1209 | struct kvm_mmu_page, link); | 1196 | struct kvm_mmu_page, link); |
| 1210 | kvm_mmu_zap_page(vcpu, page); | 1197 | kvm_mmu_zap_page(vcpu->kvm, page); |
| 1211 | } | 1198 | } |
| 1212 | } | 1199 | } |
| 1213 | EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages); | 1200 | EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages); |
| @@ -1219,7 +1206,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu) | |||
| 1219 | while (!list_empty(&vcpu->kvm->active_mmu_pages)) { | 1206 | while (!list_empty(&vcpu->kvm->active_mmu_pages)) { |
| 1220 | page = container_of(vcpu->kvm->active_mmu_pages.next, | 1207 | page = container_of(vcpu->kvm->active_mmu_pages.next, |
| 1221 | struct kvm_mmu_page, link); | 1208 | struct kvm_mmu_page, link); |
| 1222 | kvm_mmu_zap_page(vcpu, page); | 1209 | kvm_mmu_zap_page(vcpu->kvm, page); |
| 1223 | } | 1210 | } |
| 1224 | free_page((unsigned long)vcpu->mmu.pae_root); | 1211 | free_page((unsigned long)vcpu->mmu.pae_root); |
| 1225 | } | 1212 | } |
| @@ -1277,9 +1264,8 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu) | |||
| 1277 | mmu_free_memory_caches(vcpu); | 1264 | mmu_free_memory_caches(vcpu); |
| 1278 | } | 1265 | } |
| 1279 | 1266 | ||
| 1280 | void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot) | 1267 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) |
| 1281 | { | 1268 | { |
| 1282 | struct kvm *kvm = vcpu->kvm; | ||
| 1283 | struct kvm_mmu_page *page; | 1269 | struct kvm_mmu_page *page; |
| 1284 | 1270 | ||
| 1285 | list_for_each_entry(page, &kvm->active_mmu_pages, link) { | 1271 | list_for_each_entry(page, &kvm->active_mmu_pages, link) { |
| @@ -1293,27 +1279,20 @@ void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot) | |||
| 1293 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) | 1279 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) |
| 1294 | /* avoid RMW */ | 1280 | /* avoid RMW */ |
| 1295 | if (pt[i] & PT_WRITABLE_MASK) { | 1281 | if (pt[i] & PT_WRITABLE_MASK) { |
| 1296 | rmap_remove(vcpu, &pt[i]); | 1282 | rmap_remove(&pt[i]); |
| 1297 | pt[i] &= ~PT_WRITABLE_MASK; | 1283 | pt[i] &= ~PT_WRITABLE_MASK; |
| 1298 | } | 1284 | } |
| 1299 | } | 1285 | } |
| 1300 | } | 1286 | } |
| 1301 | 1287 | ||
| 1302 | void kvm_mmu_zap_all(struct kvm_vcpu *vcpu) | 1288 | void kvm_mmu_zap_all(struct kvm *kvm) |
| 1303 | { | 1289 | { |
| 1304 | destroy_kvm_mmu(vcpu); | 1290 | struct kvm_mmu_page *page, *node; |
| 1305 | 1291 | ||
| 1306 | while (!list_empty(&vcpu->kvm->active_mmu_pages)) { | 1292 | list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link) |
| 1307 | struct kvm_mmu_page *page; | 1293 | kvm_mmu_zap_page(kvm, page); |
| 1308 | |||
| 1309 | page = container_of(vcpu->kvm->active_mmu_pages.next, | ||
| 1310 | struct kvm_mmu_page, link); | ||
| 1311 | kvm_mmu_zap_page(vcpu, page); | ||
| 1312 | } | ||
| 1313 | 1294 | ||
| 1314 | mmu_free_memory_caches(vcpu); | 1295 | kvm_flush_remote_tlbs(kvm); |
| 1315 | kvm_flush_remote_tlbs(vcpu->kvm); | ||
| 1316 | init_kvm_mmu(vcpu); | ||
| 1317 | } | 1296 | } |
| 1318 | 1297 | ||
| 1319 | void kvm_mmu_module_exit(void) | 1298 | void kvm_mmu_module_exit(void) |
