aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 28f9a44060cc..6f8392d4034e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -559,7 +559,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
559 559
560static unsigned kvm_page_table_hashfn(gfn_t gfn) 560static unsigned kvm_page_table_hashfn(gfn_t gfn)
561{ 561{
562 return gfn; 562 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
563} 563}
564 564
565static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, 565static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
@@ -663,7 +663,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
663 struct hlist_node *node; 663 struct hlist_node *node;
664 664
665 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); 665 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
666 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 666 index = kvm_page_table_hashfn(gfn);
667 bucket = &kvm->arch.mmu_page_hash[index]; 667 bucket = &kvm->arch.mmu_page_hash[index];
668 hlist_for_each_entry(sp, node, bucket, hash_link) 668 hlist_for_each_entry(sp, node, bucket, hash_link)
669 if (sp->gfn == gfn && !sp->role.metaphysical) { 669 if (sp->gfn == gfn && !sp->role.metaphysical) {
@@ -701,7 +701,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
701 } 701 }
702 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__, 702 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
703 gfn, role.word); 703 gfn, role.word);
704 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 704 index = kvm_page_table_hashfn(gfn);
705 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 705 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
706 hlist_for_each_entry(sp, node, bucket, hash_link) 706 hlist_for_each_entry(sp, node, bucket, hash_link)
707 if (sp->gfn == gfn && sp->role.word == role.word) { 707 if (sp->gfn == gfn && sp->role.word == role.word) {
@@ -840,7 +840,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
840 840
841 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); 841 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
842 r = 0; 842 r = 0;
843 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 843 index = kvm_page_table_hashfn(gfn);
844 bucket = &kvm->arch.mmu_page_hash[index]; 844 bucket = &kvm->arch.mmu_page_hash[index];
845 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) 845 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
846 if (sp->gfn == gfn && !sp->role.metaphysical) { 846 if (sp->gfn == gfn && !sp->role.metaphysical) {
@@ -1450,7 +1450,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1450 vcpu->arch.last_pt_write_count = 1; 1450 vcpu->arch.last_pt_write_count = 1;
1451 vcpu->arch.last_pte_updated = NULL; 1451 vcpu->arch.last_pte_updated = NULL;
1452 } 1452 }
1453 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 1453 index = kvm_page_table_hashfn(gfn);
1454 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 1454 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1455 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { 1455 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1456 if (sp->gfn != gfn || sp->role.metaphysical) 1456 if (sp->gfn != gfn || sp->role.metaphysical)