aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3101060033a..bbd9f4af444 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -418,17 +418,17 @@ int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
418/* 418/*
419 * Load the pae pdptrs. Return true is they are all valid. 419 * Load the pae pdptrs. Return true is they are all valid.
420 */ 420 */
421int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) 421int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
422{ 422{
423 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; 423 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
424 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; 424 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
425 int i; 425 int i;
426 int ret; 426 int ret;
427 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; 427 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
428 428
429 ret = kvm_read_nested_guest_page(vcpu, pdpt_gfn, pdpte, 429 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
430 offset * sizeof(u64), sizeof(pdpte), 430 offset * sizeof(u64), sizeof(pdpte),
431 PFERR_USER_MASK|PFERR_WRITE_MASK); 431 PFERR_USER_MASK|PFERR_WRITE_MASK);
432 if (ret < 0) { 432 if (ret < 0) {
433 ret = 0; 433 ret = 0;
434 goto out; 434 goto out;
@@ -442,7 +442,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
442 } 442 }
443 ret = 1; 443 ret = 1;
444 444
445 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); 445 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
446 __set_bit(VCPU_EXREG_PDPTR, 446 __set_bit(VCPU_EXREG_PDPTR,
447 (unsigned long *)&vcpu->arch.regs_avail); 447 (unsigned long *)&vcpu->arch.regs_avail);
448 __set_bit(VCPU_EXREG_PDPTR, 448 __set_bit(VCPU_EXREG_PDPTR,
@@ -455,7 +455,7 @@ EXPORT_SYMBOL_GPL(load_pdptrs);
455 455
456static bool pdptrs_changed(struct kvm_vcpu *vcpu) 456static bool pdptrs_changed(struct kvm_vcpu *vcpu)
457{ 457{
458 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; 458 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
459 bool changed = true; 459 bool changed = true;
460 int offset; 460 int offset;
461 gfn_t gfn; 461 gfn_t gfn;
@@ -474,7 +474,7 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
474 PFERR_USER_MASK | PFERR_WRITE_MASK); 474 PFERR_USER_MASK | PFERR_WRITE_MASK);
475 if (r < 0) 475 if (r < 0)
476 goto out; 476 goto out;
477 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; 477 changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
478out: 478out:
479 479
480 return changed; 480 return changed;
@@ -513,7 +513,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
513 return 1; 513 return 1;
514 } else 514 } else
515#endif 515#endif
516 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) 516 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
517 vcpu->arch.cr3))
517 return 1; 518 return 1;
518 } 519 }
519 520
@@ -602,7 +603,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
602 return 1; 603 return 1;
603 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 604 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
604 && ((cr4 ^ old_cr4) & pdptr_bits) 605 && ((cr4 ^ old_cr4) & pdptr_bits)
605 && !load_pdptrs(vcpu, vcpu->arch.cr3)) 606 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))
606 return 1; 607 return 1;
607 608
608 if (cr4 & X86_CR4_VMXE) 609 if (cr4 & X86_CR4_VMXE)
@@ -635,7 +636,8 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
635 if (is_pae(vcpu)) { 636 if (is_pae(vcpu)) {
636 if (cr3 & CR3_PAE_RESERVED_BITS) 637 if (cr3 & CR3_PAE_RESERVED_BITS)
637 return 1; 638 return 1;
638 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) 639 if (is_paging(vcpu) &&
640 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
639 return 1; 641 return 1;
640 } 642 }
641 /* 643 /*
@@ -5422,7 +5424,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5422 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 5424 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
5423 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 5425 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
5424 if (!is_long_mode(vcpu) && is_pae(vcpu)) { 5426 if (!is_long_mode(vcpu) && is_pae(vcpu)) {
5425 load_pdptrs(vcpu, vcpu->arch.cr3); 5427 load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
5426 mmu_reset_needed = 1; 5428 mmu_reset_needed = 1;
5427 } 5429 }
5428 5430