diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2011-03-27 22:29:27 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-05-11 07:57:03 -0400 |
commit | 7c5625227ff8c81953e953d8e25c3eba2ab0aeb3 (patch) | |
tree | 4592af4fbf9e9f95cdd063b5423afee24a1e6b90 | |
parent | a0c0ab2feb9d696978a7475dce4253ec62e98a16 (diff) |
KVM: MMU: remove mmu_seq verification on pte update path
The mmu_seq verification can be removed since we get the pfn in the
protection of mmu_lock.
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 16 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 4 |
3 files changed, 7 insertions, 15 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f7dfd6479d02..ecdc562ea3e2 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -274,7 +274,7 @@ struct kvm_mmu { | |||
274 | struct kvm_mmu_page *sp); | 274 | struct kvm_mmu_page *sp); |
275 | void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); | 275 | void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); |
276 | void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | 276 | void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
277 | u64 *spte, const void *pte, unsigned long mmu_seq); | 277 | u64 *spte, const void *pte); |
278 | hpa_t root_hpa; | 278 | hpa_t root_hpa; |
279 | int root_level; | 279 | int root_level; |
280 | int shadow_root_level; | 280 | int shadow_root_level; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 22fae7593ee7..28418054b880 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1206,7 +1206,7 @@ static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) | |||
1206 | 1206 | ||
1207 | static void nonpaging_update_pte(struct kvm_vcpu *vcpu, | 1207 | static void nonpaging_update_pte(struct kvm_vcpu *vcpu, |
1208 | struct kvm_mmu_page *sp, u64 *spte, | 1208 | struct kvm_mmu_page *sp, u64 *spte, |
1209 | const void *pte, unsigned long mmu_seq) | 1209 | const void *pte) |
1210 | { | 1210 | { |
1211 | WARN_ON(1); | 1211 | WARN_ON(1); |
1212 | } | 1212 | } |
@@ -3163,9 +3163,8 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, | |||
3163 | } | 3163 | } |
3164 | 3164 | ||
3165 | static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, | 3165 | static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, |
3166 | struct kvm_mmu_page *sp, | 3166 | struct kvm_mmu_page *sp, u64 *spte, |
3167 | u64 *spte, | 3167 | const void *new) |
3168 | const void *new, unsigned long mmu_seq) | ||
3169 | { | 3168 | { |
3170 | if (sp->role.level != PT_PAGE_TABLE_LEVEL) { | 3169 | if (sp->role.level != PT_PAGE_TABLE_LEVEL) { |
3171 | ++vcpu->kvm->stat.mmu_pde_zapped; | 3170 | ++vcpu->kvm->stat.mmu_pde_zapped; |
@@ -3173,7 +3172,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, | |||
3173 | } | 3172 | } |
3174 | 3173 | ||
3175 | ++vcpu->kvm->stat.mmu_pte_updated; | 3174 | ++vcpu->kvm->stat.mmu_pte_updated; |
3176 | vcpu->arch.mmu.update_pte(vcpu, sp, spte, new, mmu_seq); | 3175 | vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); |
3177 | } | 3176 | } |
3178 | 3177 | ||
3179 | static bool need_remote_flush(u64 old, u64 new) | 3178 | static bool need_remote_flush(u64 old, u64 new) |
@@ -3229,7 +3228,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
3229 | struct kvm_mmu_page *sp; | 3228 | struct kvm_mmu_page *sp; |
3230 | struct hlist_node *node; | 3229 | struct hlist_node *node; |
3231 | LIST_HEAD(invalid_list); | 3230 | LIST_HEAD(invalid_list); |
3232 | unsigned long mmu_seq; | ||
3233 | u64 entry, gentry, *spte; | 3231 | u64 entry, gentry, *spte; |
3234 | unsigned pte_size, page_offset, misaligned, quadrant, offset; | 3232 | unsigned pte_size, page_offset, misaligned, quadrant, offset; |
3235 | int level, npte, invlpg_counter, r, flooded = 0; | 3233 | int level, npte, invlpg_counter, r, flooded = 0; |
@@ -3271,9 +3269,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
3271 | break; | 3269 | break; |
3272 | } | 3270 | } |
3273 | 3271 | ||
3274 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | ||
3275 | smp_rmb(); | ||
3276 | |||
3277 | spin_lock(&vcpu->kvm->mmu_lock); | 3272 | spin_lock(&vcpu->kvm->mmu_lock); |
3278 | if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter) | 3273 | if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter) |
3279 | gentry = 0; | 3274 | gentry = 0; |
@@ -3345,8 +3340,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
3345 | if (gentry && | 3340 | if (gentry && |
3346 | !((sp->role.word ^ vcpu->arch.mmu.base_role.word) | 3341 | !((sp->role.word ^ vcpu->arch.mmu.base_role.word) |
3347 | & mask.word)) | 3342 | & mask.word)) |
3348 | mmu_pte_write_new_pte(vcpu, sp, spte, &gentry, | 3343 | mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); |
3349 | mmu_seq); | ||
3350 | if (!remote_flush && need_remote_flush(entry, *spte)) | 3344 | if (!remote_flush && need_remote_flush(entry, *spte)) |
3351 | remote_flush = true; | 3345 | remote_flush = true; |
3352 | ++spte; | 3346 | ++spte; |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index c6397795d865..74f8567d57ac 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -325,7 +325,7 @@ no_present: | |||
325 | } | 325 | } |
326 | 326 | ||
327 | static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | 327 | static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
328 | u64 *spte, const void *pte, unsigned long mmu_seq) | 328 | u64 *spte, const void *pte) |
329 | { | 329 | { |
330 | pt_element_t gpte; | 330 | pt_element_t gpte; |
331 | unsigned pte_access; | 331 | unsigned pte_access; |
@@ -342,8 +342,6 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | |||
342 | kvm_release_pfn_clean(pfn); | 342 | kvm_release_pfn_clean(pfn); |
343 | return; | 343 | return; |
344 | } | 344 | } |
345 | if (mmu_notifier_retry(vcpu, mmu_seq)) | ||
346 | return; | ||
347 | 345 | ||
348 | /* | 346 | /* |
349 | * we call mmu_set_spte() with host_writable = true because that | 347 | * we call mmu_set_spte() with host_writable = true because that |