aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-03-27 22:29:27 -0400
committerAvi Kivity <avi@redhat.com>2011-05-11 07:57:03 -0400
commit7c5625227ff8c81953e953d8e25c3eba2ab0aeb3 (patch)
tree4592af4fbf9e9f95cdd063b5423afee24a1e6b90 /arch/x86/kvm/mmu.c
parenta0c0ab2feb9d696978a7475dce4253ec62e98a16 (diff)
KVM: MMU: remove mmu_seq verification on pte update path
The mmu_seq verification can be removed since we get the pfn in the protection of mmu_lock. Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c16
1 files changed, 5 insertions, 11 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 22fae7593ee7..28418054b880 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1206,7 +1206,7 @@ static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
1206 1206
1207static void nonpaging_update_pte(struct kvm_vcpu *vcpu, 1207static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
1208 struct kvm_mmu_page *sp, u64 *spte, 1208 struct kvm_mmu_page *sp, u64 *spte,
1209 const void *pte, unsigned long mmu_seq) 1209 const void *pte)
1210{ 1210{
1211 WARN_ON(1); 1211 WARN_ON(1);
1212} 1212}
@@ -3163,9 +3163,8 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
3163} 3163}
3164 3164
3165static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, 3165static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
3166 struct kvm_mmu_page *sp, 3166 struct kvm_mmu_page *sp, u64 *spte,
3167 u64 *spte, 3167 const void *new)
3168 const void *new, unsigned long mmu_seq)
3169{ 3168{
3170 if (sp->role.level != PT_PAGE_TABLE_LEVEL) { 3169 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
3171 ++vcpu->kvm->stat.mmu_pde_zapped; 3170 ++vcpu->kvm->stat.mmu_pde_zapped;
@@ -3173,7 +3172,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
3173 } 3172 }
3174 3173
3175 ++vcpu->kvm->stat.mmu_pte_updated; 3174 ++vcpu->kvm->stat.mmu_pte_updated;
3176 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new, mmu_seq); 3175 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new);
3177} 3176}
3178 3177
3179static bool need_remote_flush(u64 old, u64 new) 3178static bool need_remote_flush(u64 old, u64 new)
@@ -3229,7 +3228,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3229 struct kvm_mmu_page *sp; 3228 struct kvm_mmu_page *sp;
3230 struct hlist_node *node; 3229 struct hlist_node *node;
3231 LIST_HEAD(invalid_list); 3230 LIST_HEAD(invalid_list);
3232 unsigned long mmu_seq;
3233 u64 entry, gentry, *spte; 3231 u64 entry, gentry, *spte;
3234 unsigned pte_size, page_offset, misaligned, quadrant, offset; 3232 unsigned pte_size, page_offset, misaligned, quadrant, offset;
3235 int level, npte, invlpg_counter, r, flooded = 0; 3233 int level, npte, invlpg_counter, r, flooded = 0;
@@ -3271,9 +3269,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3271 break; 3269 break;
3272 } 3270 }
3273 3271
3274 mmu_seq = vcpu->kvm->mmu_notifier_seq;
3275 smp_rmb();
3276
3277 spin_lock(&vcpu->kvm->mmu_lock); 3272 spin_lock(&vcpu->kvm->mmu_lock);
3278 if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter) 3273 if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
3279 gentry = 0; 3274 gentry = 0;
@@ -3345,8 +3340,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3345 if (gentry && 3340 if (gentry &&
3346 !((sp->role.word ^ vcpu->arch.mmu.base_role.word) 3341 !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
3347 & mask.word)) 3342 & mask.word))
3348 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry, 3343 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
3349 mmu_seq);
3350 if (!remote_flush && need_remote_flush(entry, *spte)) 3344 if (!remote_flush && need_remote_flush(entry, *spte))
3351 remote_flush = true; 3345 remote_flush = true;
3352 ++spte; 3346 ++spte;