diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2007-12-20 19:18:23 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 11:01:20 -0500 |
commit | 7ec54588210df29ea637e6054489bc942c0ef371 (patch) | |
tree | a21f672f29965a7fa35d90b3c3fe034150ae9ec7 /arch/x86 | |
parent | 10589a4699bb978c781ce73bbae8ca942c5250c9 (diff) |
KVM: Add kvm_read_guest_atomic()
In preparation for a mmu spinlock, add kvm_read_guest_atomic()
and use it in fetch() and prefetch_page().
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 28 |
1 files changed, 16 insertions, 12 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 7f83f5557d5e..136a65d72b0a 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -316,10 +316,12 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
316 | metaphysical, access, | 316 | metaphysical, access, |
317 | shadow_ent, &new_page); | 317 | shadow_ent, &new_page); |
318 | if (new_page && !metaphysical) { | 318 | if (new_page && !metaphysical) { |
319 | int r; | ||
319 | pt_element_t curr_pte; | 320 | pt_element_t curr_pte; |
320 | kvm_read_guest(vcpu->kvm, walker->pte_gpa[level - 2], | 321 | r = kvm_read_guest_atomic(vcpu->kvm, |
321 | &curr_pte, sizeof(curr_pte)); | 322 | walker->pte_gpa[level - 2], |
322 | if (curr_pte != walker->ptes[level - 2]) | 323 | &curr_pte, sizeof(curr_pte)); |
324 | if (r || curr_pte != walker->ptes[level - 2]) | ||
323 | return NULL; | 325 | return NULL; |
324 | } | 326 | } |
325 | shadow_addr = __pa(shadow_page->spt); | 327 | shadow_addr = __pa(shadow_page->spt); |
@@ -429,9 +431,8 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) | |||
429 | static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, | 431 | static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, |
430 | struct kvm_mmu_page *sp) | 432 | struct kvm_mmu_page *sp) |
431 | { | 433 | { |
432 | int i, offset = 0; | 434 | int i, offset = 0, r = 0; |
433 | pt_element_t *gpt; | 435 | pt_element_t pt; |
434 | struct page *page; | ||
435 | 436 | ||
436 | if (sp->role.metaphysical | 437 | if (sp->role.metaphysical |
437 | || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { | 438 | || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { |
@@ -441,15 +442,18 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, | |||
441 | 442 | ||
442 | if (PTTYPE == 32) | 443 | if (PTTYPE == 32) |
443 | offset = sp->role.quadrant << PT64_LEVEL_BITS; | 444 | offset = sp->role.quadrant << PT64_LEVEL_BITS; |
444 | page = gfn_to_page(vcpu->kvm, sp->gfn); | 445 | |
445 | gpt = kmap_atomic(page, KM_USER0); | 446 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { |
446 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) | 447 | gpa_t pte_gpa = gfn_to_gpa(sp->gfn); |
447 | if (is_present_pte(gpt[offset + i])) | 448 | pte_gpa += (i+offset) * sizeof(pt_element_t); |
449 | |||
450 | r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &pt, | ||
451 | sizeof(pt_element_t)); | ||
452 | if (r || is_present_pte(pt)) | ||
448 | sp->spt[i] = shadow_trap_nonpresent_pte; | 453 | sp->spt[i] = shadow_trap_nonpresent_pte; |
449 | else | 454 | else |
450 | sp->spt[i] = shadow_notrap_nonpresent_pte; | 455 | sp->spt[i] = shadow_notrap_nonpresent_pte; |
451 | kunmap_atomic(gpt, KM_USER0); | 456 | } |
452 | kvm_release_page_clean(page); | ||
453 | } | 457 | } |
454 | 458 | ||
455 | #undef pt_element_t | 459 | #undef pt_element_t |