aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-09-16 19:54:47 -0400
committerAvi Kivity <avi@redhat.com>2008-10-15 08:25:06 -0400
commit4c2155ce81c193788082d4b8cdbc26d79edebc58 (patch)
tree5c028fe4a18c55b5c0e9c4e80be459dad3f96da7 /arch/x86/kvm/paging_tmpl.h
parent777b3f49d297e387866604093b635e5bc9b9d2a6 (diff)
KVM: switch to get_user_pages_fast
Convert gfn_to_pfn to use get_user_pages_fast, which can do lockless pagetable lookups on x86. Kernel compilation on 4-way guest is 3.7% faster on VMX. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h8
1 files changed, 1 insertions, 7 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index b671f61be41e..6dd08e096e24 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -102,14 +102,10 @@ static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
102 pt_element_t *table; 102 pt_element_t *table;
103 struct page *page; 103 struct page *page;
104 104
105 down_read(&current->mm->mmap_sem);
106 page = gfn_to_page(kvm, table_gfn); 105 page = gfn_to_page(kvm, table_gfn);
107 up_read(&current->mm->mmap_sem);
108 106
109 table = kmap_atomic(page, KM_USER0); 107 table = kmap_atomic(page, KM_USER0);
110
111 ret = CMPXCHG(&table[index], orig_pte, new_pte); 108 ret = CMPXCHG(&table[index], orig_pte, new_pte);
112
113 kunmap_atomic(table, KM_USER0); 109 kunmap_atomic(table, KM_USER0);
114 110
115 kvm_release_page_dirty(page); 111 kvm_release_page_dirty(page);
@@ -418,7 +414,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
418 return 0; 414 return 0;
419 } 415 }
420 416
421 down_read(&current->mm->mmap_sem);
422 if (walker.level == PT_DIRECTORY_LEVEL) { 417 if (walker.level == PT_DIRECTORY_LEVEL) {
423 gfn_t large_gfn; 418 gfn_t large_gfn;
424 large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1); 419 large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
@@ -428,9 +423,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
428 } 423 }
429 } 424 }
430 mmu_seq = vcpu->kvm->mmu_notifier_seq; 425 mmu_seq = vcpu->kvm->mmu_notifier_seq;
431 /* implicit mb(), we'll read before PT lock is unlocked */ 426 smp_rmb();
432 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn); 427 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
433 up_read(&current->mm->mmap_sem);
434 428
435 /* mmio */ 429 /* mmio */
436 if (is_error_pfn(pfn)) { 430 if (is_error_pfn(pfn)) {