aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2012-09-12 07:52:00 -0400
committerAvi Kivity <avi@redhat.com>2012-09-20 06:00:08 -0400
commit97d64b788114be1c4dc4bfe7a8ba2bf9643fe6af (patch)
tree50dfed391cb52aba63cc41d0cdbdf07ee2d792e4 /arch/x86/kvm/paging_tmpl.h
parent8cbc70696f149e44753b0fe60162b4ff96c2dd2b (diff)
KVM: MMU: Optimize pte permission checks
walk_addr_generic() permission checks are a maze of branchy code, which is performed four times per lookup. It depends on the type of access, efer.nxe, cr0.wp, cr4.smep, and in the near future, cr4.smap. Optimize this away by precalculating all variants and storing them in a bitmap. The bitmap is recalculated when rarely-changing variables change (cr0, cr4) and is indexed by the often-changing variables (page fault error code, pte access permissions). The permission check is moved to the end of the loop, otherwise an SMEP fault could be reported as a false positive, when PDE.U=1 but PTE.U=0. Noted by Xiao Guangrong. The result is short, branch-free code. Reviewed-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h22
1 files changed, 4 insertions, 18 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 35a05dd2f69c..8f6c59fadbbe 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -169,7 +169,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
169 pt_element_t pte; 169 pt_element_t pte;
170 pt_element_t __user *uninitialized_var(ptep_user); 170 pt_element_t __user *uninitialized_var(ptep_user);
171 gfn_t table_gfn; 171 gfn_t table_gfn;
172 unsigned index, pt_access, uninitialized_var(pte_access); 172 unsigned index, pt_access, pte_access;
173 gpa_t pte_gpa; 173 gpa_t pte_gpa;
174 bool eperm, last_gpte; 174 bool eperm, last_gpte;
175 int offset; 175 int offset;
@@ -237,24 +237,9 @@ retry_walk:
237 goto error; 237 goto error;
238 } 238 }
239 239
240 if (!check_write_user_access(vcpu, write_fault, user_fault, 240 pte_access = pt_access & gpte_access(vcpu, pte);
241 pte))
242 eperm = true;
243
244#if PTTYPE == 64
245 if (unlikely(fetch_fault && (pte & PT64_NX_MASK)))
246 eperm = true;
247#endif
248 241
249 last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte); 242 last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte);
250 if (last_gpte) {
251 pte_access = pt_access & gpte_access(vcpu, pte);
252 /* check if the kernel is fetching from user page */
253 if (unlikely(pte_access & PT_USER_MASK) &&
254 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
255 if (fetch_fault && !user_fault)
256 eperm = true;
257 }
258 243
259 walker->ptes[walker->level - 1] = pte; 244 walker->ptes[walker->level - 1] = pte;
260 245
@@ -284,10 +269,11 @@ retry_walk:
284 break; 269 break;
285 } 270 }
286 271
287 pt_access &= gpte_access(vcpu, pte); 272 pt_access &= pte_access;
288 --walker->level; 273 --walker->level;
289 } 274 }
290 275
276 eperm |= permission_fault(mmu, pte_access, access);
291 if (unlikely(eperm)) { 277 if (unlikely(eperm)) {
292 errcode |= PFERR_PRESENT_MASK; 278 errcode |= PFERR_PRESENT_MASK;
293 goto error; 279 goto error;