aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h5
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h2
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/kvm/vmx.c16
-rw-r--r--arch/x86/kvm/x86.c26
5 files changed, 27 insertions, 24 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 574db6d1532..9e70de37654 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -257,6 +257,8 @@ struct kvm_mmu {
257 257
258 u64 *pae_root; 258 u64 *pae_root;
259 u64 rsvd_bits_mask[2][4]; 259 u64 rsvd_bits_mask[2][4];
260
261 u64 pdptrs[4]; /* pae */
260}; 262};
261 263
262struct kvm_vcpu_arch { 264struct kvm_vcpu_arch {
@@ -276,7 +278,6 @@ struct kvm_vcpu_arch {
276 unsigned long cr4_guest_owned_bits; 278 unsigned long cr4_guest_owned_bits;
277 unsigned long cr8; 279 unsigned long cr8;
278 u32 hflags; 280 u32 hflags;
279 u64 pdptrs[4]; /* pae */
280 u64 efer; 281 u64 efer;
281 u64 apic_base; 282 u64 apic_base;
282 struct kvm_lapic *apic; /* kernel irqchip context */ 283 struct kvm_lapic *apic; /* kernel irqchip context */
@@ -592,7 +593,7 @@ void kvm_mmu_zap_all(struct kvm *kvm);
592unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 593unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
593void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 594void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
594 595
595int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 596int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
596 597
597int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 598int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
598 const void *val, int bytes); 599 const void *val, int bytes);
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 6491ac8e755..a37abe2ec39 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -42,7 +42,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
42 (unsigned long *)&vcpu->arch.regs_avail)) 42 (unsigned long *)&vcpu->arch.regs_avail))
43 kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR); 43 kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
44 44
45 return vcpu->arch.pdptrs[index]; 45 return vcpu->arch.walk_mmu->pdptrs[index];
46} 46}
47 47
48static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) 48static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 53c9039583f..ca711cb27a1 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1010,7 +1010,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1010 switch (reg) { 1010 switch (reg) {
1011 case VCPU_EXREG_PDPTR: 1011 case VCPU_EXREG_PDPTR:
1012 BUG_ON(!npt_enabled); 1012 BUG_ON(!npt_enabled);
1013 load_pdptrs(vcpu, vcpu->arch.cr3); 1013 load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
1014 break; 1014 break;
1015 default: 1015 default:
1016 BUG(); 1016 BUG();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ff7a8d48fd2..1a7691a8717 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1842,20 +1842,20 @@ static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
1842 return; 1842 return;
1843 1843
1844 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 1844 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1845 vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]); 1845 vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]);
1846 vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]); 1846 vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]);
1847 vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]); 1847 vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]);
1848 vmcs_write64(GUEST_PDPTR3, vcpu->arch.pdptrs[3]); 1848 vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]);
1849 } 1849 }
1850} 1850}
1851 1851
1852static void ept_save_pdptrs(struct kvm_vcpu *vcpu) 1852static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
1853{ 1853{
1854 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 1854 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1855 vcpu->arch.pdptrs[0] = vmcs_read64(GUEST_PDPTR0); 1855 vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
1856 vcpu->arch.pdptrs[1] = vmcs_read64(GUEST_PDPTR1); 1856 vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
1857 vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); 1857 vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
1858 vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); 1858 vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
1859 } 1859 }
1860 1860
1861 __set_bit(VCPU_EXREG_PDPTR, 1861 __set_bit(VCPU_EXREG_PDPTR,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3101060033a..bbd9f4af444 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -418,17 +418,17 @@ int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
418/* 418/*
419 * Load the pae pdptrs. Return true is they are all valid. 419 * Load the pae pdptrs. Return true is they are all valid.
420 */ 420 */
421int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) 421int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
422{ 422{
423 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; 423 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
424 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; 424 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
425 int i; 425 int i;
426 int ret; 426 int ret;
427 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; 427 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
428 428
429 ret = kvm_read_nested_guest_page(vcpu, pdpt_gfn, pdpte, 429 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
430 offset * sizeof(u64), sizeof(pdpte), 430 offset * sizeof(u64), sizeof(pdpte),
431 PFERR_USER_MASK|PFERR_WRITE_MASK); 431 PFERR_USER_MASK|PFERR_WRITE_MASK);
432 if (ret < 0) { 432 if (ret < 0) {
433 ret = 0; 433 ret = 0;
434 goto out; 434 goto out;
@@ -442,7 +442,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
442 } 442 }
443 ret = 1; 443 ret = 1;
444 444
445 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); 445 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
446 __set_bit(VCPU_EXREG_PDPTR, 446 __set_bit(VCPU_EXREG_PDPTR,
447 (unsigned long *)&vcpu->arch.regs_avail); 447 (unsigned long *)&vcpu->arch.regs_avail);
448 __set_bit(VCPU_EXREG_PDPTR, 448 __set_bit(VCPU_EXREG_PDPTR,
@@ -455,7 +455,7 @@ EXPORT_SYMBOL_GPL(load_pdptrs);
455 455
456static bool pdptrs_changed(struct kvm_vcpu *vcpu) 456static bool pdptrs_changed(struct kvm_vcpu *vcpu)
457{ 457{
458 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; 458 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
459 bool changed = true; 459 bool changed = true;
460 int offset; 460 int offset;
461 gfn_t gfn; 461 gfn_t gfn;
@@ -474,7 +474,7 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
474 PFERR_USER_MASK | PFERR_WRITE_MASK); 474 PFERR_USER_MASK | PFERR_WRITE_MASK);
475 if (r < 0) 475 if (r < 0)
476 goto out; 476 goto out;
477 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; 477 changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
478out: 478out:
479 479
480 return changed; 480 return changed;
@@ -513,7 +513,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
513 return 1; 513 return 1;
514 } else 514 } else
515#endif 515#endif
516 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) 516 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
517 vcpu->arch.cr3))
517 return 1; 518 return 1;
518 } 519 }
519 520
@@ -602,7 +603,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
602 return 1; 603 return 1;
603 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 604 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
604 && ((cr4 ^ old_cr4) & pdptr_bits) 605 && ((cr4 ^ old_cr4) & pdptr_bits)
605 && !load_pdptrs(vcpu, vcpu->arch.cr3)) 606 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))
606 return 1; 607 return 1;
607 608
608 if (cr4 & X86_CR4_VMXE) 609 if (cr4 & X86_CR4_VMXE)
@@ -635,7 +636,8 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
635 if (is_pae(vcpu)) { 636 if (is_pae(vcpu)) {
636 if (cr3 & CR3_PAE_RESERVED_BITS) 637 if (cr3 & CR3_PAE_RESERVED_BITS)
637 return 1; 638 return 1;
638 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) 639 if (is_paging(vcpu) &&
640 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
639 return 1; 641 return 1;
640 } 642 }
641 /* 643 /*
@@ -5422,7 +5424,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5422 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 5424 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
5423 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 5425 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
5424 if (!is_long_mode(vcpu) && is_pae(vcpu)) { 5426 if (!is_long_mode(vcpu) && is_pae(vcpu)) {
5425 load_pdptrs(vcpu, vcpu->arch.cr3); 5427 load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
5426 mmu_reset_needed = 1; 5428 mmu_reset_needed = 1;
5427 } 5429 }
5428 5430