aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorMarcelo Tosatti <marcelo@kvack.org>2008-02-23 09:44:30 -0500
committerAvi Kivity <avi@qumranet.com>2008-04-27 04:53:25 -0400
commit05da45583de9b383dc81dd695fe248431d6c9f2b (patch)
treea76d699e60aca4f775d5f67254214654235e2e17 /arch/x86/kvm/paging_tmpl.h
parent2e53d63acba75795aa226febd140f67c58c6a353 (diff)
KVM: MMU: large page support
Create large pages mappings if the guest PTE's are marked as such and the underlying memory is hugetlbfs backed. If the largepage contains write-protected pages, a large pte is not used. Gives a consistent 2% improvement for data copies on ram mounted filesystem, without NPT/EPT. Anthony measures a 4% improvement on 4-way kernbench, with NPT. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h32
1 files changed, 26 insertions, 6 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 4b55f462e2b..17f9d160ca3 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -248,6 +248,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
248 pt_element_t gpte; 248 pt_element_t gpte;
249 unsigned pte_access; 249 unsigned pte_access;
250 struct page *npage; 250 struct page *npage;
251 int largepage = vcpu->arch.update_pte.largepage;
251 252
252 gpte = *(const pt_element_t *)pte; 253 gpte = *(const pt_element_t *)pte;
253 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { 254 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
@@ -264,7 +265,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
264 return; 265 return;
265 get_page(npage); 266 get_page(npage);
266 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, 267 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
267 gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte), npage); 268 gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
269 npage);
268} 270}
269 271
270/* 272/*
@@ -272,8 +274,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
272 */ 274 */
273static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, 275static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
274 struct guest_walker *walker, 276 struct guest_walker *walker,
275 int user_fault, int write_fault, int *ptwrite, 277 int user_fault, int write_fault, int largepage,
276 struct page *page) 278 int *ptwrite, struct page *page)
277{ 279{
278 hpa_t shadow_addr; 280 hpa_t shadow_addr;
279 int level; 281 int level;
@@ -301,11 +303,19 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
301 shadow_ent = ((u64 *)__va(shadow_addr)) + index; 303 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
302 if (level == PT_PAGE_TABLE_LEVEL) 304 if (level == PT_PAGE_TABLE_LEVEL)
303 break; 305 break;
304 if (is_shadow_present_pte(*shadow_ent)) { 306
307 if (largepage && level == PT_DIRECTORY_LEVEL)
308 break;
309
310 if (is_shadow_present_pte(*shadow_ent)
311 && !is_large_pte(*shadow_ent)) {
305 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK; 312 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
306 continue; 313 continue;
307 } 314 }
308 315
316 if (is_large_pte(*shadow_ent))
317 rmap_remove(vcpu->kvm, shadow_ent);
318
309 if (level - 1 == PT_PAGE_TABLE_LEVEL 319 if (level - 1 == PT_PAGE_TABLE_LEVEL
310 && walker->level == PT_DIRECTORY_LEVEL) { 320 && walker->level == PT_DIRECTORY_LEVEL) {
311 metaphysical = 1; 321 metaphysical = 1;
@@ -339,7 +349,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
339 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access, 349 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
340 user_fault, write_fault, 350 user_fault, write_fault,
341 walker->ptes[walker->level-1] & PT_DIRTY_MASK, 351 walker->ptes[walker->level-1] & PT_DIRTY_MASK,
342 ptwrite, walker->gfn, page); 352 ptwrite, largepage, walker->gfn, page);
343 353
344 return shadow_ent; 354 return shadow_ent;
345} 355}
@@ -369,6 +379,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
369 int write_pt = 0; 379 int write_pt = 0;
370 int r; 380 int r;
371 struct page *page; 381 struct page *page;
382 int largepage = 0;
372 383
373 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code); 384 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
374 kvm_mmu_audit(vcpu, "pre page fault"); 385 kvm_mmu_audit(vcpu, "pre page fault");
@@ -396,6 +407,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
396 } 407 }
397 408
398 down_read(&current->mm->mmap_sem); 409 down_read(&current->mm->mmap_sem);
410 if (walker.level == PT_DIRECTORY_LEVEL) {
411 gfn_t large_gfn;
412 large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
413 if (is_largepage_backed(vcpu, large_gfn)) {
414 walker.gfn = large_gfn;
415 largepage = 1;
416 }
417 }
399 page = gfn_to_page(vcpu->kvm, walker.gfn); 418 page = gfn_to_page(vcpu->kvm, walker.gfn);
400 up_read(&current->mm->mmap_sem); 419 up_read(&current->mm->mmap_sem);
401 420
@@ -410,7 +429,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
410 spin_lock(&vcpu->kvm->mmu_lock); 429 spin_lock(&vcpu->kvm->mmu_lock);
411 kvm_mmu_free_some_pages(vcpu); 430 kvm_mmu_free_some_pages(vcpu);
412 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, 431 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
413 &write_pt, page); 432 largepage, &write_pt, page);
433
414 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__, 434 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
415 shadow_pte, *shadow_pte, write_pt); 435 shadow_pte, *shadow_pte, write_pt);
416 436