diff options
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 28 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 2 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 20 |
3 files changed, 38 insertions, 12 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 7f83f5557d5e..136a65d72b0a 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -316,10 +316,12 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
316 | metaphysical, access, | 316 | metaphysical, access, |
317 | shadow_ent, &new_page); | 317 | shadow_ent, &new_page); |
318 | if (new_page && !metaphysical) { | 318 | if (new_page && !metaphysical) { |
319 | int r; | ||
319 | pt_element_t curr_pte; | 320 | pt_element_t curr_pte; |
320 | kvm_read_guest(vcpu->kvm, walker->pte_gpa[level - 2], | 321 | r = kvm_read_guest_atomic(vcpu->kvm, |
321 | &curr_pte, sizeof(curr_pte)); | 322 | walker->pte_gpa[level - 2], |
322 | if (curr_pte != walker->ptes[level - 2]) | 323 | &curr_pte, sizeof(curr_pte)); |
324 | if (r || curr_pte != walker->ptes[level - 2]) | ||
323 | return NULL; | 325 | return NULL; |
324 | } | 326 | } |
325 | shadow_addr = __pa(shadow_page->spt); | 327 | shadow_addr = __pa(shadow_page->spt); |
@@ -429,9 +431,8 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) | |||
429 | static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, | 431 | static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, |
430 | struct kvm_mmu_page *sp) | 432 | struct kvm_mmu_page *sp) |
431 | { | 433 | { |
432 | int i, offset = 0; | 434 | int i, offset = 0, r = 0; |
433 | pt_element_t *gpt; | 435 | pt_element_t pt; |
434 | struct page *page; | ||
435 | 436 | ||
436 | if (sp->role.metaphysical | 437 | if (sp->role.metaphysical |
437 | || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { | 438 | || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { |
@@ -441,15 +442,18 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, | |||
441 | 442 | ||
442 | if (PTTYPE == 32) | 443 | if (PTTYPE == 32) |
443 | offset = sp->role.quadrant << PT64_LEVEL_BITS; | 444 | offset = sp->role.quadrant << PT64_LEVEL_BITS; |
444 | page = gfn_to_page(vcpu->kvm, sp->gfn); | 445 | |
445 | gpt = kmap_atomic(page, KM_USER0); | 446 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { |
446 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) | 447 | gpa_t pte_gpa = gfn_to_gpa(sp->gfn); |
447 | if (is_present_pte(gpt[offset + i])) | 448 | pte_gpa += (i+offset) * sizeof(pt_element_t); |
449 | |||
450 | r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &pt, | ||
451 | sizeof(pt_element_t)); | ||
452 | if (r || is_present_pte(pt)) | ||
448 | sp->spt[i] = shadow_trap_nonpresent_pte; | 453 | sp->spt[i] = shadow_trap_nonpresent_pte; |
449 | else | 454 | else |
450 | sp->spt[i] = shadow_notrap_nonpresent_pte; | 455 | sp->spt[i] = shadow_notrap_nonpresent_pte; |
451 | kunmap_atomic(gpt, KM_USER0); | 456 | } |
452 | kvm_release_page_clean(page); | ||
453 | } | 457 | } |
454 | 458 | ||
455 | #undef pt_element_t | 459 | #undef pt_element_t |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9ff5904c5072..a020fb280540 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -167,6 +167,8 @@ void kvm_release_page_clean(struct page *page); | |||
167 | void kvm_release_page_dirty(struct page *page); | 167 | void kvm_release_page_dirty(struct page *page); |
168 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, | 168 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
169 | int len); | 169 | int len); |
170 | int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, | ||
171 | unsigned long len); | ||
170 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); | 172 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); |
171 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, | 173 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, |
172 | int offset, int len); | 174 | int offset, int len); |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 678e80561b74..8d0b7c16c2f7 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -541,6 +541,26 @@ int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) | |||
541 | } | 541 | } |
542 | EXPORT_SYMBOL_GPL(kvm_read_guest); | 542 | EXPORT_SYMBOL_GPL(kvm_read_guest); |
543 | 543 | ||
544 | int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, | ||
545 | unsigned long len) | ||
546 | { | ||
547 | int r; | ||
548 | unsigned long addr; | ||
549 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
550 | int offset = offset_in_page(gpa); | ||
551 | |||
552 | addr = gfn_to_hva(kvm, gfn); | ||
553 | if (kvm_is_error_hva(addr)) | ||
554 | return -EFAULT; | ||
555 | pagefault_disable(); | ||
556 | r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); | ||
557 | pagefault_enable(); | ||
558 | if (r) | ||
559 | return -EFAULT; | ||
560 | return 0; | ||
561 | } | ||
562 | EXPORT_SYMBOL(kvm_read_guest_atomic); | ||
563 | |||
544 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, | 564 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, |
545 | int offset, int len) | 565 | int offset, int len) |
546 | { | 566 | { |