aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2012-10-16 08:08:43 -0400
committerAvi Kivity <avi@redhat.com>2012-10-17 10:39:16 -0400
commitd4878f24e32f5ea5330e6a48977c8997396bc014 (patch)
treed975e5d2f193d12787e35144ccb1df8bbb269c10 /arch/x86/kvm/paging_tmpl.h
parentbd660776da89b031632128fc8dbf6635f94cb659 (diff)
KVM: MMU: cleanup FNAME(page_fault)
Let it return emulate state instead of spte like __direct_map Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h32
1 files changed, 13 insertions, 19 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 045d31ae8eb3..c5555329c735 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -427,21 +427,21 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
427 427
428/* 428/*
429 * Fetch a shadow pte for a specific level in the paging hierarchy. 429 * Fetch a shadow pte for a specific level in the paging hierarchy.
430 * If the guest tries to write a write-protected page, we need to
431 * emulate this operation, return 1 to indicate this case.
430 */ 432 */
431static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, 433static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
432 struct guest_walker *gw, 434 struct guest_walker *gw,
433 int user_fault, int write_fault, int hlevel, 435 int user_fault, int write_fault, int hlevel,
434 int *emulate, pfn_t pfn, bool map_writable, 436 pfn_t pfn, bool map_writable, bool prefault)
435 bool prefault)
436{ 437{
437 unsigned access = gw->pt_access;
438 struct kvm_mmu_page *sp = NULL; 438 struct kvm_mmu_page *sp = NULL;
439 int top_level;
440 unsigned direct_access;
441 struct kvm_shadow_walk_iterator it; 439 struct kvm_shadow_walk_iterator it;
440 unsigned direct_access, access = gw->pt_access;
441 int top_level, emulate = 0;
442 442
443 if (!is_present_gpte(gw->ptes[gw->level - 1])) 443 if (!is_present_gpte(gw->ptes[gw->level - 1]))
444 return NULL; 444 return 0;
445 445
446 direct_access = gw->pte_access; 446 direct_access = gw->pte_access;
447 447
@@ -505,17 +505,17 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
505 505
506 clear_sp_write_flooding_count(it.sptep); 506 clear_sp_write_flooding_count(it.sptep);
507 mmu_set_spte(vcpu, it.sptep, access, gw->pte_access, 507 mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
508 user_fault, write_fault, emulate, it.level, 508 user_fault, write_fault, &emulate, it.level,
509 gw->gfn, pfn, prefault, map_writable); 509 gw->gfn, pfn, prefault, map_writable);
510 FNAME(pte_prefetch)(vcpu, gw, it.sptep); 510 FNAME(pte_prefetch)(vcpu, gw, it.sptep);
511 511
512 return it.sptep; 512 return emulate;
513 513
514out_gpte_changed: 514out_gpte_changed:
515 if (sp) 515 if (sp)
516 kvm_mmu_put_page(sp, it.sptep); 516 kvm_mmu_put_page(sp, it.sptep);
517 kvm_release_pfn_clean(pfn); 517 kvm_release_pfn_clean(pfn);
518 return NULL; 518 return 0;
519} 519}
520 520
521/* 521/*
@@ -538,8 +538,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
538 int write_fault = error_code & PFERR_WRITE_MASK; 538 int write_fault = error_code & PFERR_WRITE_MASK;
539 int user_fault = error_code & PFERR_USER_MASK; 539 int user_fault = error_code & PFERR_USER_MASK;
540 struct guest_walker walker; 540 struct guest_walker walker;
541 u64 *sptep;
542 int emulate = 0;
543 int r; 541 int r;
544 pfn_t pfn; 542 pfn_t pfn;
545 int level = PT_PAGE_TABLE_LEVEL; 543 int level = PT_PAGE_TABLE_LEVEL;
@@ -601,17 +599,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
601 kvm_mmu_free_some_pages(vcpu); 599 kvm_mmu_free_some_pages(vcpu);
602 if (!force_pt_level) 600 if (!force_pt_level)
603 transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); 601 transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
604 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, 602 r = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
605 level, &emulate, pfn, map_writable, prefault); 603 level, pfn, map_writable, prefault);
606 (void)sptep;
607 pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
608 sptep, *sptep, emulate);
609
610 ++vcpu->stat.pf_fixed; 604 ++vcpu->stat.pf_fixed;
611 kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); 605 kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
612 spin_unlock(&vcpu->kvm->mmu_lock); 606 spin_unlock(&vcpu->kvm->mmu_lock);
613 607
614 return emulate; 608 return r;
615 609
616out_unlock: 610out_unlock:
617 spin_unlock(&vcpu->kvm->mmu_lock); 611 spin_unlock(&vcpu->kvm->mmu_lock);