diff options
author | Marcelo Tosatti <marcelo@kvack.org> | 2008-02-23 09:44:30 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-04-27 04:53:25 -0400 |
commit | 05da45583de9b383dc81dd695fe248431d6c9f2b (patch) | |
tree | a76d699e60aca4f775d5f67254214654235e2e17 /virt/kvm/kvm_main.c | |
parent | 2e53d63acba75795aa226febd140f67c58c6a353 (diff) |
KVM: MMU: large page support
Create large pages mappings if the guest PTE's are marked as such and
the underlying memory is hugetlbfs backed. If the largepage contains
write-protected pages, a large pte is not used.
Gives a consistent 2% improvement for data copies on ram mounted
filesystem, without NPT/EPT.
Anthony measures a 4% improvement on 4-way kernbench, with NPT.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r-- | virt/kvm/kvm_main.c | 25 |
1 files changed, 24 insertions, 1 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index c41eb57ce29b..31db9b4d3016 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -212,9 +212,13 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free, | |||
212 | if (!dont || free->dirty_bitmap != dont->dirty_bitmap) | 212 | if (!dont || free->dirty_bitmap != dont->dirty_bitmap) |
213 | vfree(free->dirty_bitmap); | 213 | vfree(free->dirty_bitmap); |
214 | 214 | ||
215 | if (!dont || free->lpage_info != dont->lpage_info) | ||
216 | vfree(free->lpage_info); | ||
217 | |||
215 | free->npages = 0; | 218 | free->npages = 0; |
216 | free->dirty_bitmap = NULL; | 219 | free->dirty_bitmap = NULL; |
217 | free->rmap = NULL; | 220 | free->rmap = NULL; |
221 | free->lpage_info = NULL; | ||
218 | } | 222 | } |
219 | 223 | ||
220 | void kvm_free_physmem(struct kvm *kvm) | 224 | void kvm_free_physmem(struct kvm *kvm) |
@@ -324,6 +328,25 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
324 | new.user_alloc = user_alloc; | 328 | new.user_alloc = user_alloc; |
325 | new.userspace_addr = mem->userspace_addr; | 329 | new.userspace_addr = mem->userspace_addr; |
326 | } | 330 | } |
331 | if (npages && !new.lpage_info) { | ||
332 | int largepages = npages / KVM_PAGES_PER_HPAGE; | ||
333 | if (npages % KVM_PAGES_PER_HPAGE) | ||
334 | largepages++; | ||
335 | if (base_gfn % KVM_PAGES_PER_HPAGE) | ||
336 | largepages++; | ||
337 | |||
338 | new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info)); | ||
339 | |||
340 | if (!new.lpage_info) | ||
341 | goto out_free; | ||
342 | |||
343 | memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info)); | ||
344 | |||
345 | if (base_gfn % KVM_PAGES_PER_HPAGE) | ||
346 | new.lpage_info[0].write_count = 1; | ||
347 | if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE) | ||
348 | new.lpage_info[largepages-1].write_count = 1; | ||
349 | } | ||
327 | 350 | ||
328 | /* Allocate page dirty bitmap if needed */ | 351 | /* Allocate page dirty bitmap if needed */ |
329 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { | 352 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { |
@@ -467,7 +490,7 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) | |||
467 | } | 490 | } |
468 | EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); | 491 | EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); |
469 | 492 | ||
470 | static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) | 493 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) |
471 | { | 494 | { |
472 | struct kvm_memory_slot *slot; | 495 | struct kvm_memory_slot *slot; |
473 | 496 | ||