aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2012-09-12 07:52:00 -0400
committerAvi Kivity <avi@redhat.com>2012-09-20 06:00:08 -0400
commit97d64b788114be1c4dc4bfe7a8ba2bf9643fe6af (patch)
tree50dfed391cb52aba63cc41d0cdbdf07ee2d792e4 /arch/x86/kvm/x86.c
parent8cbc70696f149e44753b0fe60162b4ff96c2dd2b (diff)
KVM: MMU: Optimize pte permission checks
walk_addr_generic() permission checks are a maze of branchy code, which is performed four times per lookup. It depends on the type of access, efer.nxe, cr0.wp, cr4.smep, and in the near future, cr4.smap. Optimize this away by precalculating all variants and storing them in a bitmap. The bitmap is recalculated when rarely-changing variables change (cr0, cr4) and is indexed by the often-changing variables (page fault error code, pte access permissions). The permission check is moved to the end of the loop, otherwise an SMEP fault could be reported as a false positive, when PDE.U=1 but PTE.U=0. Noted by Xiao Guangrong. The result is short, branch-free code. Reviewed-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c11
1 files changed, 4 insertions, 7 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 19047eafa38d..497226e49d4b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3672,20 +3672,17 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
3672 gpa_t *gpa, struct x86_exception *exception, 3672 gpa_t *gpa, struct x86_exception *exception,
3673 bool write) 3673 bool write)
3674{ 3674{
3675 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3675 u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
3676 | (write ? PFERR_WRITE_MASK : 0);
3676 3677
3677 if (vcpu_match_mmio_gva(vcpu, gva) && 3678 if (vcpu_match_mmio_gva(vcpu, gva)
3678 check_write_user_access(vcpu, write, access, 3679 && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) {
3679 vcpu->arch.access)) {
3680 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | 3680 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
3681 (gva & (PAGE_SIZE - 1)); 3681 (gva & (PAGE_SIZE - 1));
3682 trace_vcpu_match_mmio(gva, *gpa, write, false); 3682 trace_vcpu_match_mmio(gva, *gpa, write, false);
3683 return 1; 3683 return 1;
3684 } 3684 }
3685 3685
3686 if (write)
3687 access |= PFERR_WRITE_MASK;
3688
3689 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 3686 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3690 3687
3691 if (*gpa == UNMAPPED_GVA) 3688 if (*gpa == UNMAPPED_GVA)