diff options
-rw-r--r-- | Documentation/kvm/mmu.txt | 4 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 38 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 3 |
3 files changed, 35 insertions, 10 deletions
diff --git a/Documentation/kvm/mmu.txt b/Documentation/kvm/mmu.txt index 0e872ae30914..2201dcba92a1 100644 --- a/Documentation/kvm/mmu.txt +++ b/Documentation/kvm/mmu.txt | |||
@@ -180,7 +180,9 @@ Shadow pages contain the following information: | |||
180 | guest pages as leaves. | 180 | guest pages as leaves. |
181 | gfns: | 181 | gfns: |
182 | An array of 512 guest frame numbers, one for each present pte. Used to | 182 | An array of 512 guest frame numbers, one for each present pte. Used to |
183 | perform a reverse map from a pte to a gfn. | 183 | perform a reverse map from a pte to a gfn. When role.direct is set, any |
184 | element of this array can be calculated from the gfn field when used, in | ||
185 | this case, the array of gfns is not allocated. See role.direct and gfn. | ||
184 | slot_bitmap: | 186 | slot_bitmap: |
185 | A bitmap containing one bit per memory slot. If the page contains a pte | 187 | A bitmap containing one bit per memory slot. If the page contains a pte |
186 | mapping a page from memory slot n, then bit n of slot_bitmap will be set | 188 | mapping a page from memory slot n, then bit n of slot_bitmap will be set |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 07673487fd5d..f46b6c9aff27 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -397,6 +397,22 @@ static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd) | |||
397 | kmem_cache_free(rmap_desc_cache, rd); | 397 | kmem_cache_free(rmap_desc_cache, rd); |
398 | } | 398 | } |
399 | 399 | ||
400 | static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) | ||
401 | { | ||
402 | if (!sp->role.direct) | ||
403 | return sp->gfns[index]; | ||
404 | |||
405 | return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); | ||
406 | } | ||
407 | |||
408 | static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) | ||
409 | { | ||
410 | if (sp->role.direct) | ||
411 | BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index)); | ||
412 | else | ||
413 | sp->gfns[index] = gfn; | ||
414 | } | ||
415 | |||
400 | /* | 416 | /* |
401 | * Return the pointer to the largepage write count for a given | 417 | * Return the pointer to the largepage write count for a given |
402 | * gfn, handling slots that are not large page aligned. | 418 | * gfn, handling slots that are not large page aligned. |
@@ -547,7 +563,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) | |||
547 | return count; | 563 | return count; |
548 | gfn = unalias_gfn(vcpu->kvm, gfn); | 564 | gfn = unalias_gfn(vcpu->kvm, gfn); |
549 | sp = page_header(__pa(spte)); | 565 | sp = page_header(__pa(spte)); |
550 | sp->gfns[spte - sp->spt] = gfn; | 566 | kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); |
551 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); | 567 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); |
552 | if (!*rmapp) { | 568 | if (!*rmapp) { |
553 | rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); | 569 | rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); |
@@ -605,6 +621,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) | |||
605 | struct kvm_rmap_desc *prev_desc; | 621 | struct kvm_rmap_desc *prev_desc; |
606 | struct kvm_mmu_page *sp; | 622 | struct kvm_mmu_page *sp; |
607 | pfn_t pfn; | 623 | pfn_t pfn; |
624 | gfn_t gfn; | ||
608 | unsigned long *rmapp; | 625 | unsigned long *rmapp; |
609 | int i; | 626 | int i; |
610 | 627 | ||
@@ -616,7 +633,8 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) | |||
616 | kvm_set_pfn_accessed(pfn); | 633 | kvm_set_pfn_accessed(pfn); |
617 | if (is_writable_pte(*spte)) | 634 | if (is_writable_pte(*spte)) |
618 | kvm_set_pfn_dirty(pfn); | 635 | kvm_set_pfn_dirty(pfn); |
619 | rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level); | 636 | gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); |
637 | rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); | ||
620 | if (!*rmapp) { | 638 | if (!*rmapp) { |
621 | printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); | 639 | printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); |
622 | BUG(); | 640 | BUG(); |
@@ -900,7 +918,8 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) | |||
900 | ASSERT(is_empty_shadow_page(sp->spt)); | 918 | ASSERT(is_empty_shadow_page(sp->spt)); |
901 | list_del(&sp->link); | 919 | list_del(&sp->link); |
902 | __free_page(virt_to_page(sp->spt)); | 920 | __free_page(virt_to_page(sp->spt)); |
903 | __free_page(virt_to_page(sp->gfns)); | 921 | if (!sp->role.direct) |
922 | __free_page(virt_to_page(sp->gfns)); | ||
904 | kmem_cache_free(mmu_page_header_cache, sp); | 923 | kmem_cache_free(mmu_page_header_cache, sp); |
905 | ++kvm->arch.n_free_mmu_pages; | 924 | ++kvm->arch.n_free_mmu_pages; |
906 | } | 925 | } |
@@ -911,13 +930,15 @@ static unsigned kvm_page_table_hashfn(gfn_t gfn) | |||
911 | } | 930 | } |
912 | 931 | ||
913 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, | 932 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, |
914 | u64 *parent_pte) | 933 | u64 *parent_pte, int direct) |
915 | { | 934 | { |
916 | struct kvm_mmu_page *sp; | 935 | struct kvm_mmu_page *sp; |
917 | 936 | ||
918 | sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp); | 937 | sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp); |
919 | sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); | 938 | sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); |
920 | sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); | 939 | if (!direct) |
940 | sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, | ||
941 | PAGE_SIZE); | ||
921 | set_page_private(virt_to_page(sp->spt), (unsigned long)sp); | 942 | set_page_private(virt_to_page(sp->spt), (unsigned long)sp); |
922 | list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); | 943 | list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); |
923 | bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); | 944 | bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); |
@@ -1386,7 +1407,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1386 | return sp; | 1407 | return sp; |
1387 | } | 1408 | } |
1388 | ++vcpu->kvm->stat.mmu_cache_miss; | 1409 | ++vcpu->kvm->stat.mmu_cache_miss; |
1389 | sp = kvm_mmu_alloc_page(vcpu, parent_pte); | 1410 | sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct); |
1390 | if (!sp) | 1411 | if (!sp) |
1391 | return sp; | 1412 | return sp; |
1392 | sp->gfn = gfn; | 1413 | sp->gfn = gfn; |
@@ -3403,7 +3424,7 @@ void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) | |||
3403 | 3424 | ||
3404 | if (*sptep & PT_WRITABLE_MASK) { | 3425 | if (*sptep & PT_WRITABLE_MASK) { |
3405 | rev_sp = page_header(__pa(sptep)); | 3426 | rev_sp = page_header(__pa(sptep)); |
3406 | gfn = rev_sp->gfns[sptep - rev_sp->spt]; | 3427 | gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); |
3407 | 3428 | ||
3408 | if (!gfn_to_memslot(kvm, gfn)) { | 3429 | if (!gfn_to_memslot(kvm, gfn)) { |
3409 | if (!printk_ratelimit()) | 3430 | if (!printk_ratelimit()) |
@@ -3417,8 +3438,7 @@ void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) | |||
3417 | return; | 3438 | return; |
3418 | } | 3439 | } |
3419 | 3440 | ||
3420 | rmapp = gfn_to_rmap(kvm, rev_sp->gfns[sptep - rev_sp->spt], | 3441 | rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level); |
3421 | rev_sp->role.level); | ||
3422 | if (!*rmapp) { | 3442 | if (!*rmapp) { |
3423 | if (!printk_ratelimit()) | 3443 | if (!printk_ratelimit()) |
3424 | return; | 3444 | return; |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 167f53357eef..2ee7060a80a5 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -582,6 +582,9 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
582 | 582 | ||
583 | offset = nr_present = 0; | 583 | offset = nr_present = 0; |
584 | 584 | ||
585 | /* direct kvm_mmu_page can not be unsync. */ | ||
586 | BUG_ON(sp->role.direct); | ||
587 | |||
585 | if (PTTYPE == 32) | 588 | if (PTTYPE == 32) |
586 | offset = sp->role.quadrant << PT64_LEVEL_BITS; | 589 | offset = sp->role.quadrant << PT64_LEVEL_BITS; |
587 | 590 | ||