diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2008-02-07 07:47:45 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-04-27 04:53:21 -0400 |
commit | 709ddebf81cb40e3c36c6109a7892e8b93a09464 (patch) | |
tree | e7b275fde77ed005c60182e093875c13656ec43a /arch | |
parent | fb72d1674d860b0c9ef9b66b7f4f01fe5b3d2c00 (diff) |
KVM: SVM: add support for Nested Paging
This patch contains the SVM architecture dependent changes for KVM to enable
support for the Nested Paging feature of AMD Barcelona and Phenom processors.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/svm.c | 72 |
1 files changed, 67 insertions, 5 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 9e29a13136c4..8e9d4a5dacda 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -47,7 +47,12 @@ MODULE_LICENSE("GPL"); | |||
47 | #define SVM_FEATURE_LBRV (1 << 1) | 47 | #define SVM_FEATURE_LBRV (1 << 1) |
48 | #define SVM_DEATURE_SVML (1 << 2) | 48 | #define SVM_DEATURE_SVML (1 << 2) |
49 | 49 | ||
50 | /* enable NPT for AMD64 and X86 with PAE */ | ||
51 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | ||
52 | static bool npt_enabled = true; | ||
53 | #else | ||
50 | static bool npt_enabled = false; | 54 | static bool npt_enabled = false; |
55 | #endif | ||
51 | static int npt = 1; | 56 | static int npt = 1; |
52 | 57 | ||
53 | module_param(npt, int, S_IRUGO); | 58 | module_param(npt, int, S_IRUGO); |
@@ -187,7 +192,7 @@ static inline void flush_guest_tlb(struct kvm_vcpu *vcpu) | |||
187 | 192 | ||
188 | static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) | 193 | static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) |
189 | { | 194 | { |
190 | if (!(efer & EFER_LMA)) | 195 | if (!npt_enabled && !(efer & EFER_LMA)) |
191 | efer &= ~EFER_LME; | 196 | efer &= ~EFER_LME; |
192 | 197 | ||
193 | to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; | 198 | to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; |
@@ -573,6 +578,22 @@ static void init_vmcb(struct vmcb *vmcb) | |||
573 | save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP; | 578 | save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP; |
574 | save->cr4 = X86_CR4_PAE; | 579 | save->cr4 = X86_CR4_PAE; |
575 | /* rdx = ?? */ | 580 | /* rdx = ?? */ |
581 | |||
582 | if (npt_enabled) { | ||
583 | /* Setup VMCB for Nested Paging */ | ||
584 | control->nested_ctl = 1; | ||
585 | control->intercept_exceptions &= ~(1 << PF_VECTOR); | ||
586 | control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK| | ||
587 | INTERCEPT_CR3_MASK); | ||
588 | control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK| | ||
589 | INTERCEPT_CR3_MASK); | ||
590 | save->g_pat = 0x0007040600070406ULL; | ||
591 | /* enable caching because the QEMU Bios doesn't enable it */ | ||
592 | save->cr0 = X86_CR0_ET; | ||
593 | save->cr3 = 0; | ||
594 | save->cr4 = 0; | ||
595 | } | ||
596 | |||
576 | } | 597 | } |
577 | 598 | ||
578 | static int svm_vcpu_reset(struct kvm_vcpu *vcpu) | 599 | static int svm_vcpu_reset(struct kvm_vcpu *vcpu) |
@@ -807,6 +828,9 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
807 | } | 828 | } |
808 | } | 829 | } |
809 | #endif | 830 | #endif |
831 | if (npt_enabled) | ||
832 | goto set; | ||
833 | |||
810 | if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { | 834 | if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { |
811 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | 835 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); |
812 | vcpu->fpu_active = 1; | 836 | vcpu->fpu_active = 1; |
@@ -814,18 +838,26 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
814 | 838 | ||
815 | vcpu->arch.cr0 = cr0; | 839 | vcpu->arch.cr0 = cr0; |
816 | cr0 |= X86_CR0_PG | X86_CR0_WP; | 840 | cr0 |= X86_CR0_PG | X86_CR0_WP; |
817 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); | ||
818 | if (!vcpu->fpu_active) { | 841 | if (!vcpu->fpu_active) { |
819 | svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); | 842 | svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); |
820 | cr0 |= X86_CR0_TS; | 843 | cr0 |= X86_CR0_TS; |
821 | } | 844 | } |
845 | set: | ||
846 | /* | ||
847 | * re-enable caching here because the QEMU bios | ||
848 | * does not do it - this results in some delay at | ||
849 | * reboot | ||
850 | */ | ||
851 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); | ||
822 | svm->vmcb->save.cr0 = cr0; | 852 | svm->vmcb->save.cr0 = cr0; |
823 | } | 853 | } |
824 | 854 | ||
825 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 855 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
826 | { | 856 | { |
827 | vcpu->arch.cr4 = cr4; | 857 | vcpu->arch.cr4 = cr4; |
828 | to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE; | 858 | if (!npt_enabled) |
859 | cr4 |= X86_CR4_PAE; | ||
860 | to_svm(vcpu)->vmcb->save.cr4 = cr4; | ||
829 | } | 861 | } |
830 | 862 | ||
831 | static void svm_set_segment(struct kvm_vcpu *vcpu, | 863 | static void svm_set_segment(struct kvm_vcpu *vcpu, |
@@ -1313,14 +1345,34 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, | |||
1313 | [SVM_EXIT_WBINVD] = emulate_on_interception, | 1345 | [SVM_EXIT_WBINVD] = emulate_on_interception, |
1314 | [SVM_EXIT_MONITOR] = invalid_op_interception, | 1346 | [SVM_EXIT_MONITOR] = invalid_op_interception, |
1315 | [SVM_EXIT_MWAIT] = invalid_op_interception, | 1347 | [SVM_EXIT_MWAIT] = invalid_op_interception, |
1348 | [SVM_EXIT_NPF] = pf_interception, | ||
1316 | }; | 1349 | }; |
1317 | 1350 | ||
1318 | |||
1319 | static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 1351 | static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
1320 | { | 1352 | { |
1321 | struct vcpu_svm *svm = to_svm(vcpu); | 1353 | struct vcpu_svm *svm = to_svm(vcpu); |
1322 | u32 exit_code = svm->vmcb->control.exit_code; | 1354 | u32 exit_code = svm->vmcb->control.exit_code; |
1323 | 1355 | ||
1356 | if (npt_enabled) { | ||
1357 | int mmu_reload = 0; | ||
1358 | if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) { | ||
1359 | svm_set_cr0(vcpu, svm->vmcb->save.cr0); | ||
1360 | mmu_reload = 1; | ||
1361 | } | ||
1362 | vcpu->arch.cr0 = svm->vmcb->save.cr0; | ||
1363 | vcpu->arch.cr3 = svm->vmcb->save.cr3; | ||
1364 | if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { | ||
1365 | if (!load_pdptrs(vcpu, vcpu->arch.cr3)) { | ||
1366 | kvm_inject_gp(vcpu, 0); | ||
1367 | return 1; | ||
1368 | } | ||
1369 | } | ||
1370 | if (mmu_reload) { | ||
1371 | kvm_mmu_reset_context(vcpu); | ||
1372 | kvm_mmu_load(vcpu); | ||
1373 | } | ||
1374 | } | ||
1375 | |||
1324 | kvm_reput_irq(svm); | 1376 | kvm_reput_irq(svm); |
1325 | 1377 | ||
1326 | if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { | 1378 | if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { |
@@ -1331,7 +1383,8 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1331 | } | 1383 | } |
1332 | 1384 | ||
1333 | if (is_external_interrupt(svm->vmcb->control.exit_int_info) && | 1385 | if (is_external_interrupt(svm->vmcb->control.exit_int_info) && |
1334 | exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) | 1386 | exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR && |
1387 | exit_code != SVM_EXIT_NPF) | ||
1335 | printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " | 1388 | printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " |
1336 | "exit_code 0x%x\n", | 1389 | "exit_code 0x%x\n", |
1337 | __FUNCTION__, svm->vmcb->control.exit_int_info, | 1390 | __FUNCTION__, svm->vmcb->control.exit_int_info, |
@@ -1522,6 +1575,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1522 | svm->host_dr6 = read_dr6(); | 1575 | svm->host_dr6 = read_dr6(); |
1523 | svm->host_dr7 = read_dr7(); | 1576 | svm->host_dr7 = read_dr7(); |
1524 | svm->vmcb->save.cr2 = vcpu->arch.cr2; | 1577 | svm->vmcb->save.cr2 = vcpu->arch.cr2; |
1578 | /* required for live migration with NPT */ | ||
1579 | if (npt_enabled) | ||
1580 | svm->vmcb->save.cr3 = vcpu->arch.cr3; | ||
1525 | 1581 | ||
1526 | if (svm->vmcb->save.dr7 & 0xff) { | 1582 | if (svm->vmcb->save.dr7 & 0xff) { |
1527 | write_dr7(0); | 1583 | write_dr7(0); |
@@ -1665,6 +1721,12 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) | |||
1665 | { | 1721 | { |
1666 | struct vcpu_svm *svm = to_svm(vcpu); | 1722 | struct vcpu_svm *svm = to_svm(vcpu); |
1667 | 1723 | ||
1724 | if (npt_enabled) { | ||
1725 | svm->vmcb->control.nested_cr3 = root; | ||
1726 | force_new_asid(vcpu); | ||
1727 | return; | ||
1728 | } | ||
1729 | |||
1668 | svm->vmcb->save.cr3 = root; | 1730 | svm->vmcb->save.cr3 = root; |
1669 | force_new_asid(vcpu); | 1731 | force_new_asid(vcpu); |
1670 | 1732 | ||