aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2011-04-24 05:25:50 -0400
committerAvi Kivity <avi@redhat.com>2011-05-22 08:39:46 -0400
commit781e0743af3c5ba356d55bc60df59f2dded1e938 (patch)
tree549ba079bf78ce53ee93852bd1ae36f810082bc0 /arch/x86/kvm/paging_tmpl.h
parent62aaa2f05abd59598f132e6ebad86318291b5be0 (diff)
KVM: MMU: Add unlikely() annotations to walk_addr_generic()
walk_addr_generic() is a hot path and is also hard for the cpu to predict - some of the parameters (fetch_fault in particular) vary wildly from invocation to invocation. Add unlikely() annotations where appropriate; all walk failures are considered unlikely, as are cases where we have to mark the accessed or dirty bit, as they are slow paths both in kvm and on real processors. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h28
1 files changed, 15 insertions, 13 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a32a1c809149..652d56c081f7 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -172,49 +172,51 @@ walk:
172 172
173 real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), 173 real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
174 PFERR_USER_MASK|PFERR_WRITE_MASK); 174 PFERR_USER_MASK|PFERR_WRITE_MASK);
175 if (real_gfn == UNMAPPED_GVA) { 175 if (unlikely(real_gfn == UNMAPPED_GVA)) {
176 present = false; 176 present = false;
177 break; 177 break;
178 } 178 }
179 real_gfn = gpa_to_gfn(real_gfn); 179 real_gfn = gpa_to_gfn(real_gfn);
180 180
181 host_addr = gfn_to_hva(vcpu->kvm, real_gfn); 181 host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
182 if (kvm_is_error_hva(host_addr)) { 182 if (unlikely(kvm_is_error_hva(host_addr))) {
183 present = false; 183 present = false;
184 break; 184 break;
185 } 185 }
186 186
187 ptep_user = (pt_element_t __user *)((void *)host_addr + offset); 187 ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
188 if (get_user(pte, ptep_user)) { 188 if (unlikely(get_user(pte, ptep_user))) {
189 present = false; 189 present = false;
190 break; 190 break;
191 } 191 }
192 192
193 trace_kvm_mmu_paging_element(pte, walker->level); 193 trace_kvm_mmu_paging_element(pte, walker->level);
194 194
195 if (!is_present_gpte(pte)) { 195 if (unlikely(!is_present_gpte(pte))) {
196 present = false; 196 present = false;
197 break; 197 break;
198 } 198 }
199 199
200 if (is_rsvd_bits_set(&vcpu->arch.mmu, pte, walker->level)) { 200 if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
201 walker->level))) {
201 rsvd_fault = true; 202 rsvd_fault = true;
202 break; 203 break;
203 } 204 }
204 205
205 if (write_fault && !is_writable_pte(pte)) 206 if (unlikely(write_fault && !is_writable_pte(pte)
206 if (user_fault || is_write_protection(vcpu)) 207 && (user_fault || is_write_protection(vcpu))))
207 eperm = true; 208 eperm = true;
208 209
209 if (user_fault && !(pte & PT_USER_MASK)) 210 if (unlikely(user_fault && !(pte & PT_USER_MASK)))
210 eperm = true; 211 eperm = true;
211 212
212#if PTTYPE == 64 213#if PTTYPE == 64
213 if (fetch_fault && (pte & PT64_NX_MASK)) 214 if (unlikely(fetch_fault && (pte & PT64_NX_MASK)))
214 eperm = true; 215 eperm = true;
215#endif 216#endif
216 217
217 if (!eperm && !rsvd_fault && !(pte & PT_ACCESSED_MASK)) { 218 if (!eperm && !rsvd_fault
219 && unlikely(!(pte & PT_ACCESSED_MASK))) {
218 int ret; 220 int ret;
219 trace_kvm_mmu_set_accessed_bit(table_gfn, index, 221 trace_kvm_mmu_set_accessed_bit(table_gfn, index,
220 sizeof(pte)); 222 sizeof(pte));
@@ -270,10 +272,10 @@ walk:
270 --walker->level; 272 --walker->level;
271 } 273 }
272 274
273 if (!present || eperm || rsvd_fault) 275 if (unlikely(!present || eperm || rsvd_fault))
274 goto error; 276 goto error;
275 277
276 if (write_fault && !is_dirty_gpte(pte)) { 278 if (write_fault && unlikely(!is_dirty_gpte(pte))) {
277 int ret; 279 int ret;
278 280
279 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); 281 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));