aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2012-06-20 03:58:33 -0400
committerAvi Kivity <avi@redhat.com>2012-07-11 09:51:18 -0400
commit6e7d035407dc402a313e466c4f7ccb21aaed0da2 (patch)
tree722d1991431fffe0d6d74f75ded56ff31b75a288
parent4f5982a56a70a4a8b7966ef458d3fcdd27aa16cf (diff)
KVM: MMU: fold tlb flush judgement into mmu_spte_update
mmu_spte_update() is the common function, we can easily audit the path Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c33
1 files changed, 20 insertions, 13 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ed9e96806082..a2fc65ba76a5 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -479,15 +479,24 @@ static void mmu_spte_set(u64 *sptep, u64 new_spte)
479 479
480/* Rules for using mmu_spte_update: 480/* Rules for using mmu_spte_update:
481 * Update the state bits, it means the mapped pfn is not changged. 481 * Update the state bits, it means the mapped pfn is not changged.
482 *
483 * Whenever we overwrite a writable spte with a read-only one we
484 * should flush remote TLBs. Otherwise rmap_write_protect
485 * will find a read-only spte, even though the writable spte
486 * might be cached on a CPU's TLB, the return value indicates this
487 * case.
482 */ 488 */
483static void mmu_spte_update(u64 *sptep, u64 new_spte) 489static bool mmu_spte_update(u64 *sptep, u64 new_spte)
484{ 490{
485 u64 mask, old_spte = *sptep; 491 u64 mask, old_spte = *sptep;
492 bool ret = false;
486 493
487 WARN_ON(!is_rmap_spte(new_spte)); 494 WARN_ON(!is_rmap_spte(new_spte));
488 495
489 if (!is_shadow_present_pte(old_spte)) 496 if (!is_shadow_present_pte(old_spte)) {
490 return mmu_spte_set(sptep, new_spte); 497 mmu_spte_set(sptep, new_spte);
498 return ret;
499 }
491 500
492 new_spte |= old_spte & shadow_dirty_mask; 501 new_spte |= old_spte & shadow_dirty_mask;
493 502
@@ -500,13 +509,18 @@ static void mmu_spte_update(u64 *sptep, u64 new_spte)
500 else 509 else
501 old_spte = __update_clear_spte_slow(sptep, new_spte); 510 old_spte = __update_clear_spte_slow(sptep, new_spte);
502 511
512 if (is_writable_pte(old_spte) && !is_writable_pte(new_spte))
513 ret = true;
514
503 if (!shadow_accessed_mask) 515 if (!shadow_accessed_mask)
504 return; 516 return ret;
505 517
506 if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask)) 518 if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
507 kvm_set_pfn_accessed(spte_to_pfn(old_spte)); 519 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
508 if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask)) 520 if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
509 kvm_set_pfn_dirty(spte_to_pfn(old_spte)); 521 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
522
523 return ret;
510} 524}
511 525
512/* 526/*
@@ -2268,7 +2282,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2268 gfn_t gfn, pfn_t pfn, bool speculative, 2282 gfn_t gfn, pfn_t pfn, bool speculative,
2269 bool can_unsync, bool host_writable) 2283 bool can_unsync, bool host_writable)
2270{ 2284{
2271 u64 spte, entry = *sptep; 2285 u64 spte;
2272 int ret = 0; 2286 int ret = 0;
2273 2287
2274 if (set_mmio_spte(sptep, gfn, pfn, pte_access)) 2288 if (set_mmio_spte(sptep, gfn, pfn, pte_access))
@@ -2346,14 +2360,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2346 mark_page_dirty(vcpu->kvm, gfn); 2360 mark_page_dirty(vcpu->kvm, gfn);
2347 2361
2348set_pte: 2362set_pte:
2349 mmu_spte_update(sptep, spte); 2363 if (mmu_spte_update(sptep, spte))
2350 /*
2351 * If we overwrite a writable spte with a read-only one we
2352 * should flush remote TLBs. Otherwise rmap_write_protect
2353 * will find a read-only spte, even though the writable spte
2354 * might be cached on a CPU's TLB.
2355 */
2356 if (is_writable_pte(entry) && !is_writable_pte(*sptep))
2357 kvm_flush_remote_tlbs(vcpu->kvm); 2364 kvm_flush_remote_tlbs(vcpu->kvm);
2358done: 2365done:
2359 return ret; 2366 return ret;