aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-07-13 07:27:08 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 23:40:44 -0400
commit39c8c672a18c52048343d7531dfb2dcf3431ee74 (patch)
tree1942be7baebeb80a3a4875e1eb457b70e8c6c80b /arch/x86/kvm/paging_tmpl.h
parenta357bd229cdaf37a41798d238ab50b34c71dd0d6 (diff)
KVM: MMU: Add gpte_valid() helper
Move the code to check whether a gpte has changed since we fetched it into a helper. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h25
1 files changed, 17 insertions, 8 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 0c7461d3a5be..e1c1f9eb1cc1 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -299,6 +299,17 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
299 gpte_to_gfn(gpte), pfn, true, true); 299 gpte_to_gfn(gpte), pfn, true, true);
300} 300}
301 301
302static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
303 struct guest_walker *gw, int level)
304{
305 int r;
306 pt_element_t curr_pte;
307
308 r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 1],
309 &curr_pte, sizeof(curr_pte));
310 return r || curr_pte != gw->ptes[level - 1];
311}
312
302/* 313/*
303 * Fetch a shadow pte for a specific level in the paging hierarchy. 314 * Fetch a shadow pte for a specific level in the paging hierarchy.
304 */ 315 */
@@ -312,11 +323,9 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
312 u64 *sptep = NULL; 323 u64 *sptep = NULL;
313 int direct; 324 int direct;
314 gfn_t table_gfn; 325 gfn_t table_gfn;
315 int r;
316 int level; 326 int level;
317 bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]); 327 bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
318 unsigned direct_access; 328 unsigned direct_access;
319 pt_element_t curr_pte;
320 struct kvm_shadow_walk_iterator iterator; 329 struct kvm_shadow_walk_iterator iterator;
321 330
322 if (!is_present_gpte(gw->ptes[gw->level - 1])) 331 if (!is_present_gpte(gw->ptes[gw->level - 1]))
@@ -365,17 +374,17 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
365 } 374 }
366 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, 375 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
367 direct, access, sptep); 376 direct, access, sptep);
368 if (!direct) { 377 if (!direct)
369 r = kvm_read_guest_atomic(vcpu->kvm, 378 /*
370 gw->pte_gpa[level - 2], 379 * Verify that the gpte in the page we've just write
371 &curr_pte, sizeof(curr_pte)); 380 * protected is still there.
372 if (r || curr_pte != gw->ptes[level - 2]) { 381 */
382 if (FNAME(gpte_changed)(vcpu, gw, level - 1)) {
373 kvm_mmu_put_page(sp, sptep); 383 kvm_mmu_put_page(sp, sptep);
374 kvm_release_pfn_clean(pfn); 384 kvm_release_pfn_clean(pfn);
375 sptep = NULL; 385 sptep = NULL;
376 break; 386 break;
377 } 387 }
378 }
379 388
380 link_shadow_page(sptep, sp); 389 link_shadow_page(sptep, sp);
381 } 390 }