diff options
author | Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp> | 2015-10-16 04:06:02 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-10-16 04:34:00 -0400 |
commit | fd136902187838bcae3a572f41cb703553dd63b8 (patch) | |
tree | a3b6207dabee7d4bc8fe054e92cb5af8f8c59581 | |
parent | 5ed5c5c8fdbab889837c9223fc6f4bdaa830879c (diff) |
KVM: x86: MMU: Move mapping_level_dirty_bitmap() call in mapping_level()
This is necessary to eliminate an extra memory slot search later.
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/mmu.c | 29 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 6 |
2 files changed, 17 insertions, 18 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2262728863de..890cd694c9a2 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -870,10 +870,16 @@ static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) | |||
870 | return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); | 870 | return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); |
871 | } | 871 | } |
872 | 872 | ||
873 | static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) | 873 | static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn, |
874 | bool *force_pt_level) | ||
874 | { | 875 | { |
875 | int host_level, level, max_level; | 876 | int host_level, level, max_level; |
876 | 877 | ||
878 | if (likely(!*force_pt_level)) | ||
879 | *force_pt_level = mapping_level_dirty_bitmap(vcpu, large_gfn); | ||
880 | if (unlikely(*force_pt_level)) | ||
881 | return PT_PAGE_TABLE_LEVEL; | ||
882 | |||
877 | host_level = host_mapping_level(vcpu->kvm, large_gfn); | 883 | host_level = host_mapping_level(vcpu->kvm, large_gfn); |
878 | 884 | ||
879 | if (host_level == PT_PAGE_TABLE_LEVEL) | 885 | if (host_level == PT_PAGE_TABLE_LEVEL) |
@@ -2962,14 +2968,13 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, | |||
2962 | { | 2968 | { |
2963 | int r; | 2969 | int r; |
2964 | int level; | 2970 | int level; |
2965 | bool force_pt_level; | 2971 | bool force_pt_level = false; |
2966 | pfn_t pfn; | 2972 | pfn_t pfn; |
2967 | unsigned long mmu_seq; | 2973 | unsigned long mmu_seq; |
2968 | bool map_writable, write = error_code & PFERR_WRITE_MASK; | 2974 | bool map_writable, write = error_code & PFERR_WRITE_MASK; |
2969 | 2975 | ||
2970 | force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); | 2976 | level = mapping_level(vcpu, gfn, &force_pt_level); |
2971 | if (likely(!force_pt_level)) { | 2977 | if (likely(!force_pt_level)) { |
2972 | level = mapping_level(vcpu, gfn); | ||
2973 | /* | 2978 | /* |
2974 | * This path builds a PAE pagetable - so we can map | 2979 | * This path builds a PAE pagetable - so we can map |
2975 | * 2mb pages at maximum. Therefore check if the level | 2980 | * 2mb pages at maximum. Therefore check if the level |
@@ -2979,8 +2984,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, | |||
2979 | level = PT_DIRECTORY_LEVEL; | 2984 | level = PT_DIRECTORY_LEVEL; |
2980 | 2985 | ||
2981 | gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); | 2986 | gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); |
2982 | } else | 2987 | } |
2983 | level = PT_PAGE_TABLE_LEVEL; | ||
2984 | 2988 | ||
2985 | if (fast_page_fault(vcpu, v, level, error_code)) | 2989 | if (fast_page_fault(vcpu, v, level, error_code)) |
2986 | return 0; | 2990 | return 0; |
@@ -3495,20 +3499,15 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, | |||
3495 | if (r) | 3499 | if (r) |
3496 | return r; | 3500 | return r; |
3497 | 3501 | ||
3498 | if (mapping_level_dirty_bitmap(vcpu, gfn) || | 3502 | force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn, |
3499 | !check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL)) | 3503 | PT_DIRECTORY_LEVEL); |
3500 | force_pt_level = true; | 3504 | level = mapping_level(vcpu, gfn, &force_pt_level); |
3501 | else | ||
3502 | force_pt_level = false; | ||
3503 | |||
3504 | if (likely(!force_pt_level)) { | 3505 | if (likely(!force_pt_level)) { |
3505 | level = mapping_level(vcpu, gfn); | ||
3506 | if (level > PT_DIRECTORY_LEVEL && | 3506 | if (level > PT_DIRECTORY_LEVEL && |
3507 | !check_hugepage_cache_consistency(vcpu, gfn, level)) | 3507 | !check_hugepage_cache_consistency(vcpu, gfn, level)) |
3508 | level = PT_DIRECTORY_LEVEL; | 3508 | level = PT_DIRECTORY_LEVEL; |
3509 | gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); | 3509 | gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); |
3510 | } else | 3510 | } |
3511 | level = PT_PAGE_TABLE_LEVEL; | ||
3512 | 3511 | ||
3513 | if (fast_page_fault(vcpu, gpa, level, error_code)) | 3512 | if (fast_page_fault(vcpu, gpa, level, error_code)) |
3514 | return 0; | 3513 | return 0; |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 8ebc3a5560ce..bf39d0f3efa9 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -744,9 +744,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, | |||
744 | &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable); | 744 | &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable); |
745 | 745 | ||
746 | if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) { | 746 | if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) { |
747 | force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn); | 747 | level = mapping_level(vcpu, walker.gfn, &force_pt_level); |
748 | if (!force_pt_level) { | 748 | if (likely(!force_pt_level)) { |
749 | level = min(walker.level, mapping_level(vcpu, walker.gfn)); | 749 | level = min(walker.level, level); |
750 | walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1); | 750 | walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1); |
751 | } | 751 | } |
752 | } else | 752 | } else |