diff options
-rw-r--r-- | arch/x86/kvm/mmu.c | 10 | ||||
-rw-r--r-- | include/asm-x86/kvm_host.h | 3 |
2 files changed, 7 insertions, 6 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 28f9a44060cc..6f8392d4034e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -559,7 +559,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) | |||
559 | 559 | ||
560 | static unsigned kvm_page_table_hashfn(gfn_t gfn) | 560 | static unsigned kvm_page_table_hashfn(gfn_t gfn) |
561 | { | 561 | { |
562 | return gfn; | 562 | return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1); |
563 | } | 563 | } |
564 | 564 | ||
565 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, | 565 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, |
@@ -663,7 +663,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) | |||
663 | struct hlist_node *node; | 663 | struct hlist_node *node; |
664 | 664 | ||
665 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); | 665 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); |
666 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | 666 | index = kvm_page_table_hashfn(gfn); |
667 | bucket = &kvm->arch.mmu_page_hash[index]; | 667 | bucket = &kvm->arch.mmu_page_hash[index]; |
668 | hlist_for_each_entry(sp, node, bucket, hash_link) | 668 | hlist_for_each_entry(sp, node, bucket, hash_link) |
669 | if (sp->gfn == gfn && !sp->role.metaphysical) { | 669 | if (sp->gfn == gfn && !sp->role.metaphysical) { |
@@ -701,7 +701,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
701 | } | 701 | } |
702 | pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__, | 702 | pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__, |
703 | gfn, role.word); | 703 | gfn, role.word); |
704 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | 704 | index = kvm_page_table_hashfn(gfn); |
705 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; | 705 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; |
706 | hlist_for_each_entry(sp, node, bucket, hash_link) | 706 | hlist_for_each_entry(sp, node, bucket, hash_link) |
707 | if (sp->gfn == gfn && sp->role.word == role.word) { | 707 | if (sp->gfn == gfn && sp->role.word == role.word) { |
@@ -840,7 +840,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | |||
840 | 840 | ||
841 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); | 841 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); |
842 | r = 0; | 842 | r = 0; |
843 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | 843 | index = kvm_page_table_hashfn(gfn); |
844 | bucket = &kvm->arch.mmu_page_hash[index]; | 844 | bucket = &kvm->arch.mmu_page_hash[index]; |
845 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) | 845 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) |
846 | if (sp->gfn == gfn && !sp->role.metaphysical) { | 846 | if (sp->gfn == gfn && !sp->role.metaphysical) { |
@@ -1450,7 +1450,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1450 | vcpu->arch.last_pt_write_count = 1; | 1450 | vcpu->arch.last_pt_write_count = 1; |
1451 | vcpu->arch.last_pte_updated = NULL; | 1451 | vcpu->arch.last_pte_updated = NULL; |
1452 | } | 1452 | } |
1453 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | 1453 | index = kvm_page_table_hashfn(gfn); |
1454 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; | 1454 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; |
1455 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { | 1455 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { |
1456 | if (sp->gfn != gfn || sp->role.metaphysical) | 1456 | if (sp->gfn != gfn || sp->role.metaphysical) |
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 68ee390b2844..e076790ee794 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h | |||
@@ -58,7 +58,8 @@ | |||
58 | 58 | ||
59 | #define KVM_PERMILLE_MMU_PAGES 20 | 59 | #define KVM_PERMILLE_MMU_PAGES 20 |
60 | #define KVM_MIN_ALLOC_MMU_PAGES 64 | 60 | #define KVM_MIN_ALLOC_MMU_PAGES 64 |
61 | #define KVM_NUM_MMU_PAGES 1024 | 61 | #define KVM_MMU_HASH_SHIFT 10 |
62 | #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) | ||
62 | #define KVM_MIN_FREE_MMU_PAGES 5 | 63 | #define KVM_MIN_FREE_MMU_PAGES 5 |
63 | #define KVM_REFILL_PAGES 25 | 64 | #define KVM_REFILL_PAGES 25 |
64 | #define KVM_MAX_CPUID_ENTRIES 40 | 65 | #define KVM_MAX_CPUID_ENTRIES 40 |