diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2010-08-02 04:14:04 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:50:31 -0400 |
commit | 8672b7217a234c41d425a63b171af809e1169842 (patch) | |
tree | bc560e64b773b71010c77940382ad90f910fe734 /arch/x86/kvm/mmu.c | |
parent | 251464c464cf7df7d6d548f1065f49a3ecd08118 (diff) |
KVM: MMU: move bits lost judgement into a separate function
Introduce spte_has_volatile_bits() function to judge whether spte
bits will miss, it's more readable and can help us to cleanup code
later
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 20 |
1 files changed, 17 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index e430a383ad15..c07b9a200bc8 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -299,6 +299,20 @@ static u64 __xchg_spte(u64 *sptep, u64 new_spte) | |||
299 | #endif | 299 | #endif |
300 | } | 300 | } |
301 | 301 | ||
302 | static bool spte_has_volatile_bits(u64 spte) | ||
303 | { | ||
304 | if (!shadow_accessed_mask) | ||
305 | return false; | ||
306 | |||
307 | if (!is_shadow_present_pte(spte)) | ||
308 | return false; | ||
309 | |||
310 | if (spte & shadow_accessed_mask) | ||
311 | return false; | ||
312 | |||
313 | return true; | ||
314 | } | ||
315 | |||
302 | static void update_spte(u64 *sptep, u64 new_spte) | 316 | static void update_spte(u64 *sptep, u64 new_spte) |
303 | { | 317 | { |
304 | u64 old_spte; | 318 | u64 old_spte; |
@@ -679,14 +693,14 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte) | |||
679 | pfn_t pfn; | 693 | pfn_t pfn; |
680 | u64 old_spte = *sptep; | 694 | u64 old_spte = *sptep; |
681 | 695 | ||
682 | if (!shadow_accessed_mask || !is_shadow_present_pte(old_spte) || | 696 | if (!spte_has_volatile_bits(old_spte)) |
683 | old_spte & shadow_accessed_mask) { | ||
684 | __set_spte(sptep, new_spte); | 697 | __set_spte(sptep, new_spte); |
685 | } else | 698 | else |
686 | old_spte = __xchg_spte(sptep, new_spte); | 699 | old_spte = __xchg_spte(sptep, new_spte); |
687 | 700 | ||
688 | if (!is_rmap_spte(old_spte)) | 701 | if (!is_rmap_spte(old_spte)) |
689 | return; | 702 | return; |
703 | |||
690 | pfn = spte_to_pfn(old_spte); | 704 | pfn = spte_to_pfn(old_spte); |
691 | if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) | 705 | if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) |
692 | kvm_set_pfn_accessed(pfn); | 706 | kvm_set_pfn_accessed(pfn); |