aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2012-10-16 08:10:12 -0400
committerAvi Kivity <avi@redhat.com>2012-10-17 10:39:19 -0400
commitbd6360cc0a545544b5e69ae4428ac031c4e43588 (patch)
treee00565bf3ed3e59d5a0ceac48f675ca2d5dd9f37 /arch/x86
parenta052b42b0e618f34ca891f00b4e8b8ac0e4b80c0 (diff)
KVM: MMU: introduce FNAME(prefetch_gpte)
The only difference between FNAME(update_pte) and FNAME(pte_prefetch) is that the former is allowed to prefetch gfn from dirty logged slot, so introduce a common function to prefetch spte Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/paging_tmpl.h55
1 files changed, 24 insertions, 31 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 36a80edf8006..f887e4cfc1fe 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -305,31 +305,43 @@ static int FNAME(walk_addr_nested)(struct guest_walker *walker,
305 addr, access); 305 addr, access);
306} 306}
307 307
308static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 308static bool
309 u64 *spte, const void *pte) 309FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
310 u64 *spte, pt_element_t gpte, bool no_dirty_log)
310{ 311{
311 pt_element_t gpte;
312 unsigned pte_access; 312 unsigned pte_access;
313 gfn_t gfn;
313 pfn_t pfn; 314 pfn_t pfn;
314 315
315 gpte = *(const pt_element_t *)pte;
316 if (prefetch_invalid_gpte(vcpu, sp, spte, gpte)) 316 if (prefetch_invalid_gpte(vcpu, sp, spte, gpte))
317 return; 317 return false;
318 318
319 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); 319 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
320
321 gfn = gpte_to_gfn(gpte);
320 pte_access = sp->role.access & gpte_access(vcpu, gpte); 322 pte_access = sp->role.access & gpte_access(vcpu, gpte);
321 protect_clean_gpte(&pte_access, gpte); 323 protect_clean_gpte(&pte_access, gpte);
322 pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte)); 324 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
325 no_dirty_log && (pte_access & ACC_WRITE_MASK));
323 if (is_invalid_pfn(pfn)) 326 if (is_invalid_pfn(pfn))
324 return; 327 return false;
325 328
326 /* 329 /*
327 * we call mmu_set_spte() with host_writable = true because that 330 * we call mmu_set_spte() with host_writable = true because
328 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1). 331 * pte_prefetch_gfn_to_pfn always gets a writable pfn.
329 */ 332 */
330 mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, 333 mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
331 NULL, PT_PAGE_TABLE_LEVEL, 334 NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true);
332 gpte_to_gfn(gpte), pfn, true, true); 335
336 return true;
337}
338
339static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
340 u64 *spte, const void *pte)
341{
342 pt_element_t gpte = *(const pt_element_t *)pte;
343
344 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
333} 345}
334 346
335static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu, 347static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
@@ -375,33 +387,14 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
375 spte = sp->spt + i; 387 spte = sp->spt + i;
376 388
377 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { 389 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
378 pt_element_t gpte;
379 unsigned pte_access;
380 gfn_t gfn;
381 pfn_t pfn;
382
383 if (spte == sptep) 390 if (spte == sptep)
384 continue; 391 continue;
385 392
386 if (is_shadow_present_pte(*spte)) 393 if (is_shadow_present_pte(*spte))
387 continue; 394 continue;
388 395
389 gpte = gptep[i]; 396 if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
390
391 if (prefetch_invalid_gpte(vcpu, sp, spte, gpte))
392 continue;
393
394 pte_access = sp->role.access & gpte_access(vcpu, gpte);
395 protect_clean_gpte(&pte_access, gpte);
396 gfn = gpte_to_gfn(gpte);
397 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
398 pte_access & ACC_WRITE_MASK);
399 if (is_invalid_pfn(pfn))
400 break; 397 break;
401
402 mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
403 NULL, PT_PAGE_TABLE_LEVEL, gfn,
404 pfn, true, true);
405 } 398 }
406} 399}
407 400