diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2008-09-16 19:54:47 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2008-10-15 08:25:06 -0400 |
commit | 4c2155ce81c193788082d4b8cdbc26d79edebc58 (patch) | |
tree | 5c028fe4a18c55b5c0e9c4e80be459dad3f96da7 /arch/x86/kvm/mmu.c | |
parent | 777b3f49d297e387866604093b635e5bc9b9d2a6 (diff) |
KVM: switch to get_user_pages_fast
Convert gfn_to_pfn to use get_user_pages_fast, which can do lockless
pagetable lookups on x86. Kernel compilation on 4-way guest is 3.7%
faster on VMX.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 23 |
1 files changed, 9 insertions, 14 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index bce3e25ec79b..5779a2323e23 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -405,16 +405,19 @@ static int host_largepage_backed(struct kvm *kvm, gfn_t gfn) | |||
405 | { | 405 | { |
406 | struct vm_area_struct *vma; | 406 | struct vm_area_struct *vma; |
407 | unsigned long addr; | 407 | unsigned long addr; |
408 | int ret = 0; | ||
408 | 409 | ||
409 | addr = gfn_to_hva(kvm, gfn); | 410 | addr = gfn_to_hva(kvm, gfn); |
410 | if (kvm_is_error_hva(addr)) | 411 | if (kvm_is_error_hva(addr)) |
411 | return 0; | 412 | return ret; |
412 | 413 | ||
414 | down_read(¤t->mm->mmap_sem); | ||
413 | vma = find_vma(current->mm, addr); | 415 | vma = find_vma(current->mm, addr); |
414 | if (vma && is_vm_hugetlb_page(vma)) | 416 | if (vma && is_vm_hugetlb_page(vma)) |
415 | return 1; | 417 | ret = 1; |
418 | up_read(¤t->mm->mmap_sem); | ||
416 | 419 | ||
417 | return 0; | 420 | return ret; |
418 | } | 421 | } |
419 | 422 | ||
420 | static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn) | 423 | static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn) |
@@ -1140,9 +1143,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) | |||
1140 | if (gpa == UNMAPPED_GVA) | 1143 | if (gpa == UNMAPPED_GVA) |
1141 | return NULL; | 1144 | return NULL; |
1142 | 1145 | ||
1143 | down_read(¤t->mm->mmap_sem); | ||
1144 | page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); | 1146 | page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
1145 | up_read(¤t->mm->mmap_sem); | ||
1146 | 1147 | ||
1147 | return page; | 1148 | return page; |
1148 | } | 1149 | } |
@@ -1330,16 +1331,14 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) | |||
1330 | pfn_t pfn; | 1331 | pfn_t pfn; |
1331 | unsigned long mmu_seq; | 1332 | unsigned long mmu_seq; |
1332 | 1333 | ||
1333 | down_read(¤t->mm->mmap_sem); | ||
1334 | if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { | 1334 | if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { |
1335 | gfn &= ~(KVM_PAGES_PER_HPAGE-1); | 1335 | gfn &= ~(KVM_PAGES_PER_HPAGE-1); |
1336 | largepage = 1; | 1336 | largepage = 1; |
1337 | } | 1337 | } |
1338 | 1338 | ||
1339 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | 1339 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
1340 | /* implicit mb(), we'll read before PT lock is unlocked */ | 1340 | smp_rmb(); |
1341 | pfn = gfn_to_pfn(vcpu->kvm, gfn); | 1341 | pfn = gfn_to_pfn(vcpu->kvm, gfn); |
1342 | up_read(¤t->mm->mmap_sem); | ||
1343 | 1342 | ||
1344 | /* mmio */ | 1343 | /* mmio */ |
1345 | if (is_error_pfn(pfn)) { | 1344 | if (is_error_pfn(pfn)) { |
@@ -1488,15 +1487,13 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, | |||
1488 | if (r) | 1487 | if (r) |
1489 | return r; | 1488 | return r; |
1490 | 1489 | ||
1491 | down_read(¤t->mm->mmap_sem); | ||
1492 | if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { | 1490 | if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { |
1493 | gfn &= ~(KVM_PAGES_PER_HPAGE-1); | 1491 | gfn &= ~(KVM_PAGES_PER_HPAGE-1); |
1494 | largepage = 1; | 1492 | largepage = 1; |
1495 | } | 1493 | } |
1496 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | 1494 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
1497 | /* implicit mb(), we'll read before PT lock is unlocked */ | 1495 | smp_rmb(); |
1498 | pfn = gfn_to_pfn(vcpu->kvm, gfn); | 1496 | pfn = gfn_to_pfn(vcpu->kvm, gfn); |
1499 | up_read(¤t->mm->mmap_sem); | ||
1500 | if (is_error_pfn(pfn)) { | 1497 | if (is_error_pfn(pfn)) { |
1501 | kvm_release_pfn_clean(pfn); | 1498 | kvm_release_pfn_clean(pfn); |
1502 | return 1; | 1499 | return 1; |
@@ -1809,15 +1806,13 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1809 | return; | 1806 | return; |
1810 | gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; | 1807 | gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; |
1811 | 1808 | ||
1812 | down_read(¤t->mm->mmap_sem); | ||
1813 | if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) { | 1809 | if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) { |
1814 | gfn &= ~(KVM_PAGES_PER_HPAGE-1); | 1810 | gfn &= ~(KVM_PAGES_PER_HPAGE-1); |
1815 | vcpu->arch.update_pte.largepage = 1; | 1811 | vcpu->arch.update_pte.largepage = 1; |
1816 | } | 1812 | } |
1817 | vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq; | 1813 | vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq; |
1818 | /* implicit mb(), we'll read before PT lock is unlocked */ | 1814 | smp_rmb(); |
1819 | pfn = gfn_to_pfn(vcpu->kvm, gfn); | 1815 | pfn = gfn_to_pfn(vcpu->kvm, gfn); |
1820 | up_read(¤t->mm->mmap_sem); | ||
1821 | 1816 | ||
1822 | if (is_error_pfn(pfn)) { | 1817 | if (is_error_pfn(pfn)) { |
1823 | kvm_release_pfn_clean(pfn); | 1818 | kvm_release_pfn_clean(pfn); |