diff options
| author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-06 16:21:18 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-06 16:21:18 -0400 |
| commit | 6de410c2b0cc055ae9ee640c84331f6a70878d9b (patch) | |
| tree | 49dfc7df2f1977c2d665c99266ded92afc98734b /drivers/kvm/svm.c | |
| parent | c6799ade4ae04b53a5f677e5289116155ff01574 (diff) | |
| parent | 2ff81f70b56dc1cdd3bf2f08414608069db6ef1a (diff) | |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: (66 commits)
KVM: Remove unused 'instruction_length'
KVM: Don't require explicit indication of completion of mmio or pio
KVM: Remove extraneous guest entry on mmio read
KVM: SVM: Only save/restore MSRs when needed
KVM: fix an if() condition
KVM: VMX: Add lazy FPU support for VT
KVM: VMX: Properly shadow the CR0 register in the vcpu struct
KVM: Don't complain about cpu erratum AA15
KVM: Lazy FPU support for SVM
KVM: Allow passing 64-bit values to the emulated read/write API
KVM: Per-vcpu statistics
KVM: VMX: Avoid unnecessary vcpu_load()/vcpu_put() cycles
KVM: MMU: Avoid heavy ASSERT at non debug mode.
KVM: VMX: Only save/restore MSR_K6_STAR if necessary
KVM: Fold drivers/kvm/kvm_vmx.h into drivers/kvm/vmx.c
KVM: VMX: Don't switch 64-bit msrs for 32-bit guests
KVM: VMX: Reduce unnecessary saving of host msrs
KVM: Handle guest page faults when emulating mmio
KVM: SVM: Report hardware exit reason to userspace instead of dmesg
KVM: Retry sleeping allocation if atomic allocation fails
...
Diffstat (limited to 'drivers/kvm/svm.c')
| -rw-r--r-- | drivers/kvm/svm.c | 197 |
1 files changed, 125 insertions, 72 deletions
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index 3d8ea7ac2ecc..9c15f32eea18 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c | |||
| @@ -44,6 +44,10 @@ MODULE_LICENSE("GPL"); | |||
| 44 | #define KVM_EFER_LMA (1 << 10) | 44 | #define KVM_EFER_LMA (1 << 10) |
| 45 | #define KVM_EFER_LME (1 << 8) | 45 | #define KVM_EFER_LME (1 << 8) |
| 46 | 46 | ||
| 47 | #define SVM_FEATURE_NPT (1 << 0) | ||
| 48 | #define SVM_FEATURE_LBRV (1 << 1) | ||
| 49 | #define SVM_DEATURE_SVML (1 << 2) | ||
| 50 | |||
| 47 | unsigned long iopm_base; | 51 | unsigned long iopm_base; |
| 48 | unsigned long msrpm_base; | 52 | unsigned long msrpm_base; |
| 49 | 53 | ||
| @@ -59,15 +63,16 @@ struct kvm_ldttss_desc { | |||
| 59 | struct svm_cpu_data { | 63 | struct svm_cpu_data { |
| 60 | int cpu; | 64 | int cpu; |
| 61 | 65 | ||
| 62 | uint64_t asid_generation; | 66 | u64 asid_generation; |
| 63 | uint32_t max_asid; | 67 | u32 max_asid; |
| 64 | uint32_t next_asid; | 68 | u32 next_asid; |
| 65 | struct kvm_ldttss_desc *tss_desc; | 69 | struct kvm_ldttss_desc *tss_desc; |
| 66 | 70 | ||
| 67 | struct page *save_area; | 71 | struct page *save_area; |
| 68 | }; | 72 | }; |
| 69 | 73 | ||
| 70 | static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); | 74 | static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); |
| 75 | static uint32_t svm_features; | ||
| 71 | 76 | ||
| 72 | struct svm_init_data { | 77 | struct svm_init_data { |
| 73 | int cpu; | 78 | int cpu; |
| @@ -82,6 +87,11 @@ static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; | |||
| 82 | 87 | ||
| 83 | #define MAX_INST_SIZE 15 | 88 | #define MAX_INST_SIZE 15 |
| 84 | 89 | ||
| 90 | static inline u32 svm_has(u32 feat) | ||
| 91 | { | ||
| 92 | return svm_features & feat; | ||
| 93 | } | ||
| 94 | |||
| 85 | static unsigned get_addr_size(struct kvm_vcpu *vcpu) | 95 | static unsigned get_addr_size(struct kvm_vcpu *vcpu) |
| 86 | { | 96 | { |
| 87 | struct vmcb_save_area *sa = &vcpu->svm->vmcb->save; | 97 | struct vmcb_save_area *sa = &vcpu->svm->vmcb->save; |
| @@ -203,13 +213,6 @@ static void inject_ud(struct kvm_vcpu *vcpu) | |||
| 203 | UD_VECTOR; | 213 | UD_VECTOR; |
| 204 | } | 214 | } |
| 205 | 215 | ||
| 206 | static void inject_db(struct kvm_vcpu *vcpu) | ||
| 207 | { | ||
| 208 | vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | ||
| 209 | SVM_EVTINJ_TYPE_EXEPT | | ||
| 210 | DB_VECTOR; | ||
| 211 | } | ||
| 212 | |||
| 213 | static int is_page_fault(uint32_t info) | 216 | static int is_page_fault(uint32_t info) |
| 214 | { | 217 | { |
| 215 | info &= SVM_EVTINJ_VEC_MASK | SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; | 218 | info &= SVM_EVTINJ_VEC_MASK | SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; |
| @@ -309,6 +312,7 @@ static void svm_hardware_enable(void *garbage) | |||
| 309 | svm_data->asid_generation = 1; | 312 | svm_data->asid_generation = 1; |
| 310 | svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; | 313 | svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; |
| 311 | svm_data->next_asid = svm_data->max_asid + 1; | 314 | svm_data->next_asid = svm_data->max_asid + 1; |
| 315 | svm_features = cpuid_edx(SVM_CPUID_FUNC); | ||
| 312 | 316 | ||
| 313 | asm volatile ( "sgdt %0" : "=m"(gdt_descr) ); | 317 | asm volatile ( "sgdt %0" : "=m"(gdt_descr) ); |
| 314 | gdt = (struct desc_struct *)gdt_descr.address; | 318 | gdt = (struct desc_struct *)gdt_descr.address; |
| @@ -459,7 +463,6 @@ static void init_vmcb(struct vmcb *vmcb) | |||
| 459 | { | 463 | { |
| 460 | struct vmcb_control_area *control = &vmcb->control; | 464 | struct vmcb_control_area *control = &vmcb->control; |
| 461 | struct vmcb_save_area *save = &vmcb->save; | 465 | struct vmcb_save_area *save = &vmcb->save; |
| 462 | u64 tsc; | ||
| 463 | 466 | ||
| 464 | control->intercept_cr_read = INTERCEPT_CR0_MASK | | 467 | control->intercept_cr_read = INTERCEPT_CR0_MASK | |
| 465 | INTERCEPT_CR3_MASK | | 468 | INTERCEPT_CR3_MASK | |
| @@ -511,12 +514,13 @@ static void init_vmcb(struct vmcb *vmcb) | |||
| 511 | (1ULL << INTERCEPT_VMSAVE) | | 514 | (1ULL << INTERCEPT_VMSAVE) | |
| 512 | (1ULL << INTERCEPT_STGI) | | 515 | (1ULL << INTERCEPT_STGI) | |
| 513 | (1ULL << INTERCEPT_CLGI) | | 516 | (1ULL << INTERCEPT_CLGI) | |
| 514 | (1ULL << INTERCEPT_SKINIT); | 517 | (1ULL << INTERCEPT_SKINIT) | |
| 518 | (1ULL << INTERCEPT_MONITOR) | | ||
| 519 | (1ULL << INTERCEPT_MWAIT); | ||
| 515 | 520 | ||
| 516 | control->iopm_base_pa = iopm_base; | 521 | control->iopm_base_pa = iopm_base; |
| 517 | control->msrpm_base_pa = msrpm_base; | 522 | control->msrpm_base_pa = msrpm_base; |
| 518 | rdtscll(tsc); | 523 | control->tsc_offset = 0; |
| 519 | control->tsc_offset = -tsc; | ||
| 520 | control->int_ctl = V_INTR_MASKING_MASK; | 524 | control->int_ctl = V_INTR_MASKING_MASK; |
| 521 | 525 | ||
| 522 | init_seg(&save->es); | 526 | init_seg(&save->es); |
| @@ -576,12 +580,15 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) | |||
| 576 | vcpu->svm->vmcb = page_address(page); | 580 | vcpu->svm->vmcb = page_address(page); |
| 577 | memset(vcpu->svm->vmcb, 0, PAGE_SIZE); | 581 | memset(vcpu->svm->vmcb, 0, PAGE_SIZE); |
| 578 | vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; | 582 | vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; |
| 579 | vcpu->svm->cr0 = 0x00000010; | ||
| 580 | vcpu->svm->asid_generation = 0; | 583 | vcpu->svm->asid_generation = 0; |
| 581 | memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs)); | 584 | memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs)); |
| 582 | init_vmcb(vcpu->svm->vmcb); | 585 | init_vmcb(vcpu->svm->vmcb); |
| 583 | 586 | ||
| 584 | fx_init(vcpu); | 587 | fx_init(vcpu); |
| 588 | vcpu->fpu_active = 1; | ||
| 589 | vcpu->apic_base = 0xfee00000 | | ||
| 590 | /*for vcpu 0*/ MSR_IA32_APICBASE_BSP | | ||
| 591 | MSR_IA32_APICBASE_ENABLE; | ||
| 585 | 592 | ||
| 586 | return 0; | 593 | return 0; |
| 587 | 594 | ||
| @@ -602,11 +609,34 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) | |||
| 602 | 609 | ||
| 603 | static void svm_vcpu_load(struct kvm_vcpu *vcpu) | 610 | static void svm_vcpu_load(struct kvm_vcpu *vcpu) |
| 604 | { | 611 | { |
| 605 | get_cpu(); | 612 | int cpu, i; |
| 613 | |||
| 614 | cpu = get_cpu(); | ||
| 615 | if (unlikely(cpu != vcpu->cpu)) { | ||
| 616 | u64 tsc_this, delta; | ||
| 617 | |||
| 618 | /* | ||
| 619 | * Make sure that the guest sees a monotonically | ||
| 620 | * increasing TSC. | ||
| 621 | */ | ||
| 622 | rdtscll(tsc_this); | ||
| 623 | delta = vcpu->host_tsc - tsc_this; | ||
| 624 | vcpu->svm->vmcb->control.tsc_offset += delta; | ||
| 625 | vcpu->cpu = cpu; | ||
| 626 | } | ||
| 627 | |||
| 628 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) | ||
| 629 | rdmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]); | ||
| 606 | } | 630 | } |
| 607 | 631 | ||
| 608 | static void svm_vcpu_put(struct kvm_vcpu *vcpu) | 632 | static void svm_vcpu_put(struct kvm_vcpu *vcpu) |
| 609 | { | 633 | { |
| 634 | int i; | ||
| 635 | |||
| 636 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) | ||
| 637 | wrmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]); | ||
| 638 | |||
| 639 | rdtscll(vcpu->host_tsc); | ||
| 610 | put_cpu(); | 640 | put_cpu(); |
| 611 | } | 641 | } |
| 612 | 642 | ||
| @@ -714,7 +744,7 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | |||
| 714 | vcpu->svm->vmcb->save.gdtr.base = dt->base ; | 744 | vcpu->svm->vmcb->save.gdtr.base = dt->base ; |
| 715 | } | 745 | } |
| 716 | 746 | ||
| 717 | static void svm_decache_cr0_cr4_guest_bits(struct kvm_vcpu *vcpu) | 747 | static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) |
| 718 | { | 748 | { |
| 719 | } | 749 | } |
| 720 | 750 | ||
| @@ -733,9 +763,15 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
| 733 | } | 763 | } |
| 734 | } | 764 | } |
| 735 | #endif | 765 | #endif |
| 736 | vcpu->svm->cr0 = cr0; | 766 | if ((vcpu->cr0 & CR0_TS_MASK) && !(cr0 & CR0_TS_MASK)) { |
| 737 | vcpu->svm->vmcb->save.cr0 = cr0 | CR0_PG_MASK | CR0_WP_MASK; | 767 | vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); |
| 768 | vcpu->fpu_active = 1; | ||
| 769 | } | ||
| 770 | |||
| 738 | vcpu->cr0 = cr0; | 771 | vcpu->cr0 = cr0; |
| 772 | cr0 |= CR0_PG_MASK | CR0_WP_MASK; | ||
| 773 | cr0 &= ~(CR0_CD_MASK | CR0_NW_MASK); | ||
| 774 | vcpu->svm->vmcb->save.cr0 = cr0; | ||
| 739 | } | 775 | } |
| 740 | 776 | ||
| 741 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 777 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
| @@ -785,18 +821,16 @@ static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) | |||
| 785 | 821 | ||
| 786 | static void load_host_msrs(struct kvm_vcpu *vcpu) | 822 | static void load_host_msrs(struct kvm_vcpu *vcpu) |
| 787 | { | 823 | { |
| 788 | int i; | 824 | #ifdef CONFIG_X86_64 |
| 789 | 825 | wrmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base); | |
| 790 | for ( i = 0; i < NR_HOST_SAVE_MSRS; i++) | 826 | #endif |
| 791 | wrmsrl(host_save_msrs[i], vcpu->svm->host_msrs[i]); | ||
| 792 | } | 827 | } |
| 793 | 828 | ||
| 794 | static void save_host_msrs(struct kvm_vcpu *vcpu) | 829 | static void save_host_msrs(struct kvm_vcpu *vcpu) |
| 795 | { | 830 | { |
| 796 | int i; | 831 | #ifdef CONFIG_X86_64 |
| 797 | 832 | rdmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base); | |
| 798 | for ( i = 0; i < NR_HOST_SAVE_MSRS; i++) | 833 | #endif |
| 799 | rdmsrl(host_save_msrs[i], vcpu->svm->host_msrs[i]); | ||
| 800 | } | 834 | } |
| 801 | 835 | ||
| 802 | static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data) | 836 | static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data) |
| @@ -890,7 +924,7 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
| 890 | case EMULATE_DONE: | 924 | case EMULATE_DONE: |
| 891 | return 1; | 925 | return 1; |
| 892 | case EMULATE_DO_MMIO: | 926 | case EMULATE_DO_MMIO: |
| 893 | ++kvm_stat.mmio_exits; | 927 | ++vcpu->stat.mmio_exits; |
| 894 | kvm_run->exit_reason = KVM_EXIT_MMIO; | 928 | kvm_run->exit_reason = KVM_EXIT_MMIO; |
| 895 | return 0; | 929 | return 0; |
| 896 | case EMULATE_FAIL: | 930 | case EMULATE_FAIL: |
| @@ -904,6 +938,16 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
| 904 | return 0; | 938 | return 0; |
| 905 | } | 939 | } |
| 906 | 940 | ||
| 941 | static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
| 942 | { | ||
| 943 | vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | ||
| 944 | if (!(vcpu->cr0 & CR0_TS_MASK)) | ||
| 945 | vcpu->svm->vmcb->save.cr0 &= ~CR0_TS_MASK; | ||
| 946 | vcpu->fpu_active = 1; | ||
| 947 | |||
| 948 | return 1; | ||
| 949 | } | ||
| 950 | |||
| 907 | static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 951 | static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
| 908 | { | 952 | { |
| 909 | /* | 953 | /* |
| @@ -981,7 +1025,7 @@ static int io_get_override(struct kvm_vcpu *vcpu, | |||
| 981 | return 0; | 1025 | return 0; |
| 982 | } | 1026 | } |
| 983 | 1027 | ||
| 984 | static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, u64 *address) | 1028 | static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address) |
| 985 | { | 1029 | { |
| 986 | unsigned long addr_mask; | 1030 | unsigned long addr_mask; |
| 987 | unsigned long *reg; | 1031 | unsigned long *reg; |
| @@ -1025,38 +1069,38 @@ static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, u64 *address) | |||
| 1025 | static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1069 | static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
| 1026 | { | 1070 | { |
| 1027 | u32 io_info = vcpu->svm->vmcb->control.exit_info_1; //address size bug? | 1071 | u32 io_info = vcpu->svm->vmcb->control.exit_info_1; //address size bug? |
| 1028 | int _in = io_info & SVM_IOIO_TYPE_MASK; | 1072 | int size, down, in, string, rep; |
| 1073 | unsigned port; | ||
| 1074 | unsigned long count; | ||
| 1075 | gva_t address = 0; | ||
| 1029 | 1076 | ||
| 1030 | ++kvm_stat.io_exits; | 1077 | ++vcpu->stat.io_exits; |
| 1031 | 1078 | ||
| 1032 | vcpu->svm->next_rip = vcpu->svm->vmcb->control.exit_info_2; | 1079 | vcpu->svm->next_rip = vcpu->svm->vmcb->control.exit_info_2; |
| 1033 | 1080 | ||
| 1034 | kvm_run->exit_reason = KVM_EXIT_IO; | 1081 | in = (io_info & SVM_IOIO_TYPE_MASK) != 0; |
| 1035 | kvm_run->io.port = io_info >> 16; | 1082 | port = io_info >> 16; |
| 1036 | kvm_run->io.direction = (_in) ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; | 1083 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; |
| 1037 | kvm_run->io.size = ((io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT); | 1084 | string = (io_info & SVM_IOIO_STR_MASK) != 0; |
| 1038 | kvm_run->io.string = (io_info & SVM_IOIO_STR_MASK) != 0; | 1085 | rep = (io_info & SVM_IOIO_REP_MASK) != 0; |
| 1039 | kvm_run->io.rep = (io_info & SVM_IOIO_REP_MASK) != 0; | 1086 | count = 1; |
| 1087 | down = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; | ||
| 1040 | 1088 | ||
| 1041 | if (kvm_run->io.string) { | 1089 | if (string) { |
| 1042 | unsigned addr_mask; | 1090 | unsigned addr_mask; |
| 1043 | 1091 | ||
| 1044 | addr_mask = io_adress(vcpu, _in, &kvm_run->io.address); | 1092 | addr_mask = io_adress(vcpu, in, &address); |
| 1045 | if (!addr_mask) { | 1093 | if (!addr_mask) { |
| 1046 | printk(KERN_DEBUG "%s: get io address failed\n", | 1094 | printk(KERN_DEBUG "%s: get io address failed\n", |
| 1047 | __FUNCTION__); | 1095 | __FUNCTION__); |
| 1048 | return 1; | 1096 | return 1; |
| 1049 | } | 1097 | } |
| 1050 | 1098 | ||
| 1051 | if (kvm_run->io.rep) { | 1099 | if (rep) |
| 1052 | kvm_run->io.count | 1100 | count = vcpu->regs[VCPU_REGS_RCX] & addr_mask; |
| 1053 | = vcpu->regs[VCPU_REGS_RCX] & addr_mask; | 1101 | } |
| 1054 | kvm_run->io.string_down = (vcpu->svm->vmcb->save.rflags | 1102 | return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down, |
| 1055 | & X86_EFLAGS_DF) != 0; | 1103 | address, rep, port); |
| 1056 | } | ||
| 1057 | } else | ||
| 1058 | kvm_run->io.value = vcpu->svm->vmcb->save.rax; | ||
| 1059 | return 0; | ||
| 1060 | } | 1104 | } |
| 1061 | 1105 | ||
| 1062 | static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1106 | static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
| @@ -1072,13 +1116,14 @@ static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
| 1072 | return 1; | 1116 | return 1; |
| 1073 | 1117 | ||
| 1074 | kvm_run->exit_reason = KVM_EXIT_HLT; | 1118 | kvm_run->exit_reason = KVM_EXIT_HLT; |
| 1075 | ++kvm_stat.halt_exits; | 1119 | ++vcpu->stat.halt_exits; |
| 1076 | return 0; | 1120 | return 0; |
| 1077 | } | 1121 | } |
| 1078 | 1122 | ||
| 1079 | static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1123 | static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
| 1080 | { | 1124 | { |
| 1081 | vcpu->svm->vmcb->save.rip += 3; | 1125 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 3; |
| 1126 | skip_emulated_instruction(vcpu); | ||
| 1082 | return kvm_hypercall(vcpu, kvm_run); | 1127 | return kvm_hypercall(vcpu, kvm_run); |
| 1083 | } | 1128 | } |
| 1084 | 1129 | ||
| @@ -1098,8 +1143,8 @@ static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_r | |||
| 1098 | static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1143 | static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
| 1099 | { | 1144 | { |
| 1100 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; | 1145 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; |
| 1101 | kvm_run->exit_reason = KVM_EXIT_CPUID; | 1146 | kvm_emulate_cpuid(vcpu); |
| 1102 | return 0; | 1147 | return 1; |
| 1103 | } | 1148 | } |
| 1104 | 1149 | ||
| 1105 | static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1150 | static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
| @@ -1239,7 +1284,7 @@ static int interrupt_window_interception(struct kvm_vcpu *vcpu, | |||
| 1239 | */ | 1284 | */ |
| 1240 | if (kvm_run->request_interrupt_window && | 1285 | if (kvm_run->request_interrupt_window && |
| 1241 | !vcpu->irq_summary) { | 1286 | !vcpu->irq_summary) { |
| 1242 | ++kvm_stat.irq_window_exits; | 1287 | ++vcpu->stat.irq_window_exits; |
| 1243 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | 1288 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; |
| 1244 | return 0; | 1289 | return 0; |
| 1245 | } | 1290 | } |
| @@ -1267,6 +1312,7 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
| 1267 | [SVM_EXIT_WRITE_DR5] = emulate_on_interception, | 1312 | [SVM_EXIT_WRITE_DR5] = emulate_on_interception, |
| 1268 | [SVM_EXIT_WRITE_DR7] = emulate_on_interception, | 1313 | [SVM_EXIT_WRITE_DR7] = emulate_on_interception, |
| 1269 | [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, | 1314 | [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, |
| 1315 | [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, | ||
| 1270 | [SVM_EXIT_INTR] = nop_on_interception, | 1316 | [SVM_EXIT_INTR] = nop_on_interception, |
| 1271 | [SVM_EXIT_NMI] = nop_on_interception, | 1317 | [SVM_EXIT_NMI] = nop_on_interception, |
| 1272 | [SVM_EXIT_SMI] = nop_on_interception, | 1318 | [SVM_EXIT_SMI] = nop_on_interception, |
| @@ -1288,6 +1334,8 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
| 1288 | [SVM_EXIT_STGI] = invalid_op_interception, | 1334 | [SVM_EXIT_STGI] = invalid_op_interception, |
| 1289 | [SVM_EXIT_CLGI] = invalid_op_interception, | 1335 | [SVM_EXIT_CLGI] = invalid_op_interception, |
| 1290 | [SVM_EXIT_SKINIT] = invalid_op_interception, | 1336 | [SVM_EXIT_SKINIT] = invalid_op_interception, |
| 1337 | [SVM_EXIT_MONITOR] = invalid_op_interception, | ||
| 1338 | [SVM_EXIT_MWAIT] = invalid_op_interception, | ||
| 1291 | }; | 1339 | }; |
| 1292 | 1340 | ||
| 1293 | 1341 | ||
| @@ -1295,8 +1343,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
| 1295 | { | 1343 | { |
| 1296 | u32 exit_code = vcpu->svm->vmcb->control.exit_code; | 1344 | u32 exit_code = vcpu->svm->vmcb->control.exit_code; |
| 1297 | 1345 | ||
| 1298 | kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT; | ||
| 1299 | |||
| 1300 | if (is_external_interrupt(vcpu->svm->vmcb->control.exit_int_info) && | 1346 | if (is_external_interrupt(vcpu->svm->vmcb->control.exit_int_info) && |
| 1301 | exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) | 1347 | exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) |
| 1302 | printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " | 1348 | printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " |
| @@ -1307,12 +1353,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
| 1307 | if (exit_code >= ARRAY_SIZE(svm_exit_handlers) | 1353 | if (exit_code >= ARRAY_SIZE(svm_exit_handlers) |
| 1308 | || svm_exit_handlers[exit_code] == 0) { | 1354 | || svm_exit_handlers[exit_code] == 0) { |
| 1309 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | 1355 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; |
| 1310 | printk(KERN_ERR "%s: 0x%x @ 0x%llx cr0 0x%lx rflags 0x%llx\n", | 1356 | kvm_run->hw.hardware_exit_reason = exit_code; |
| 1311 | __FUNCTION__, | ||
| 1312 | exit_code, | ||
| 1313 | vcpu->svm->vmcb->save.rip, | ||
| 1314 | vcpu->cr0, | ||
| 1315 | vcpu->svm->vmcb->save.rflags); | ||
| 1316 | return 0; | 1357 | return 0; |
| 1317 | } | 1358 | } |
| 1318 | 1359 | ||
| @@ -1461,8 +1502,10 @@ again: | |||
| 1461 | load_db_regs(vcpu->svm->db_regs); | 1502 | load_db_regs(vcpu->svm->db_regs); |
| 1462 | } | 1503 | } |
| 1463 | 1504 | ||
| 1464 | fx_save(vcpu->host_fx_image); | 1505 | if (vcpu->fpu_active) { |
| 1465 | fx_restore(vcpu->guest_fx_image); | 1506 | fx_save(vcpu->host_fx_image); |
| 1507 | fx_restore(vcpu->guest_fx_image); | ||
| 1508 | } | ||
| 1466 | 1509 | ||
| 1467 | asm volatile ( | 1510 | asm volatile ( |
| 1468 | #ifdef CONFIG_X86_64 | 1511 | #ifdef CONFIG_X86_64 |
| @@ -1573,8 +1616,10 @@ again: | |||
| 1573 | #endif | 1616 | #endif |
| 1574 | : "cc", "memory" ); | 1617 | : "cc", "memory" ); |
| 1575 | 1618 | ||
| 1576 | fx_save(vcpu->guest_fx_image); | 1619 | if (vcpu->fpu_active) { |
| 1577 | fx_restore(vcpu->host_fx_image); | 1620 | fx_save(vcpu->guest_fx_image); |
| 1621 | fx_restore(vcpu->host_fx_image); | ||
| 1622 | } | ||
| 1578 | 1623 | ||
| 1579 | if ((vcpu->svm->vmcb->save.dr7 & 0xff)) | 1624 | if ((vcpu->svm->vmcb->save.dr7 & 0xff)) |
| 1580 | load_db_regs(vcpu->svm->host_db_regs); | 1625 | load_db_regs(vcpu->svm->host_db_regs); |
| @@ -1606,8 +1651,9 @@ again: | |||
| 1606 | vcpu->svm->next_rip = 0; | 1651 | vcpu->svm->next_rip = 0; |
| 1607 | 1652 | ||
| 1608 | if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) { | 1653 | if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) { |
| 1609 | kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; | 1654 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
| 1610 | kvm_run->exit_reason = vcpu->svm->vmcb->control.exit_code; | 1655 | kvm_run->fail_entry.hardware_entry_failure_reason |
| 1656 | = vcpu->svm->vmcb->control.exit_code; | ||
| 1611 | post_kvm_run_save(vcpu, kvm_run); | 1657 | post_kvm_run_save(vcpu, kvm_run); |
| 1612 | return 0; | 1658 | return 0; |
| 1613 | } | 1659 | } |
| @@ -1615,14 +1661,16 @@ again: | |||
| 1615 | r = handle_exit(vcpu, kvm_run); | 1661 | r = handle_exit(vcpu, kvm_run); |
| 1616 | if (r > 0) { | 1662 | if (r > 0) { |
| 1617 | if (signal_pending(current)) { | 1663 | if (signal_pending(current)) { |
| 1618 | ++kvm_stat.signal_exits; | 1664 | ++vcpu->stat.signal_exits; |
| 1619 | post_kvm_run_save(vcpu, kvm_run); | 1665 | post_kvm_run_save(vcpu, kvm_run); |
| 1666 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
| 1620 | return -EINTR; | 1667 | return -EINTR; |
| 1621 | } | 1668 | } |
| 1622 | 1669 | ||
| 1623 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { | 1670 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { |
| 1624 | ++kvm_stat.request_irq_exits; | 1671 | ++vcpu->stat.request_irq_exits; |
| 1625 | post_kvm_run_save(vcpu, kvm_run); | 1672 | post_kvm_run_save(vcpu, kvm_run); |
| 1673 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
| 1626 | return -EINTR; | 1674 | return -EINTR; |
| 1627 | } | 1675 | } |
| 1628 | kvm_resched(vcpu); | 1676 | kvm_resched(vcpu); |
| @@ -1641,6 +1689,12 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) | |||
| 1641 | { | 1689 | { |
| 1642 | vcpu->svm->vmcb->save.cr3 = root; | 1690 | vcpu->svm->vmcb->save.cr3 = root; |
| 1643 | force_new_asid(vcpu); | 1691 | force_new_asid(vcpu); |
| 1692 | |||
| 1693 | if (vcpu->fpu_active) { | ||
| 1694 | vcpu->svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); | ||
| 1695 | vcpu->svm->vmcb->save.cr0 |= CR0_TS_MASK; | ||
| 1696 | vcpu->fpu_active = 0; | ||
| 1697 | } | ||
| 1644 | } | 1698 | } |
| 1645 | 1699 | ||
| 1646 | static void svm_inject_page_fault(struct kvm_vcpu *vcpu, | 1700 | static void svm_inject_page_fault(struct kvm_vcpu *vcpu, |
| @@ -1649,7 +1703,7 @@ static void svm_inject_page_fault(struct kvm_vcpu *vcpu, | |||
| 1649 | { | 1703 | { |
| 1650 | uint32_t exit_int_info = vcpu->svm->vmcb->control.exit_int_info; | 1704 | uint32_t exit_int_info = vcpu->svm->vmcb->control.exit_int_info; |
| 1651 | 1705 | ||
| 1652 | ++kvm_stat.pf_guest; | 1706 | ++vcpu->stat.pf_guest; |
| 1653 | 1707 | ||
| 1654 | if (is_page_fault(exit_int_info)) { | 1708 | if (is_page_fault(exit_int_info)) { |
| 1655 | 1709 | ||
| @@ -1709,9 +1763,8 @@ static struct kvm_arch_ops svm_arch_ops = { | |||
| 1709 | .get_segment = svm_get_segment, | 1763 | .get_segment = svm_get_segment, |
| 1710 | .set_segment = svm_set_segment, | 1764 | .set_segment = svm_set_segment, |
| 1711 | .get_cs_db_l_bits = svm_get_cs_db_l_bits, | 1765 | .get_cs_db_l_bits = svm_get_cs_db_l_bits, |
| 1712 | .decache_cr0_cr4_guest_bits = svm_decache_cr0_cr4_guest_bits, | 1766 | .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, |
| 1713 | .set_cr0 = svm_set_cr0, | 1767 | .set_cr0 = svm_set_cr0, |
| 1714 | .set_cr0_no_modeswitch = svm_set_cr0, | ||
| 1715 | .set_cr3 = svm_set_cr3, | 1768 | .set_cr3 = svm_set_cr3, |
| 1716 | .set_cr4 = svm_set_cr4, | 1769 | .set_cr4 = svm_set_cr4, |
| 1717 | .set_efer = svm_set_efer, | 1770 | .set_efer = svm_set_efer, |
