aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c147
1 files changed, 125 insertions, 22 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ce438e0fdd26..bc5b9b8d4a33 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -4,6 +4,7 @@
4 * AMD SVM support 4 * AMD SVM support
5 * 5 *
6 * Copyright (C) 2006 Qumranet, Inc. 6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright 2010 Red Hat, Inc. and/or its affilates.
7 * 8 *
8 * Authors: 9 * Authors:
9 * Yaniv Kamay <yaniv@qumranet.com> 10 * Yaniv Kamay <yaniv@qumranet.com>
@@ -130,7 +131,7 @@ static struct svm_direct_access_msrs {
130 u32 index; /* Index of the MSR */ 131 u32 index; /* Index of the MSR */
131 bool always; /* True if intercept is always on */ 132 bool always; /* True if intercept is always on */
132} direct_access_msrs[] = { 133} direct_access_msrs[] = {
133 { .index = MSR_K6_STAR, .always = true }, 134 { .index = MSR_STAR, .always = true },
134 { .index = MSR_IA32_SYSENTER_CS, .always = true }, 135 { .index = MSR_IA32_SYSENTER_CS, .always = true },
135#ifdef CONFIG_X86_64 136#ifdef CONFIG_X86_64
136 { .index = MSR_GS_BASE, .always = true }, 137 { .index = MSR_GS_BASE, .always = true },
@@ -285,11 +286,11 @@ static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
285 286
286static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) 287static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
287{ 288{
289 vcpu->arch.efer = efer;
288 if (!npt_enabled && !(efer & EFER_LMA)) 290 if (!npt_enabled && !(efer & EFER_LMA))
289 efer &= ~EFER_LME; 291 efer &= ~EFER_LME;
290 292
291 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; 293 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
292 vcpu->arch.efer = efer;
293} 294}
294 295
295static int is_external_interrupt(u32 info) 296static int is_external_interrupt(u32 info)
@@ -383,8 +384,7 @@ static void svm_init_erratum_383(void)
383 int err; 384 int err;
384 u64 val; 385 u64 val;
385 386
386 /* Only Fam10h is affected */ 387 if (!cpu_has_amd_erratum(amd_erratum_383))
387 if (boot_cpu_data.x86 != 0x10)
388 return; 388 return;
389 389
390 /* Use _safe variants to not break nested virtualization */ 390 /* Use _safe variants to not break nested virtualization */
@@ -640,7 +640,7 @@ static __init int svm_hardware_setup(void)
640 640
641 if (nested) { 641 if (nested) {
642 printk(KERN_INFO "kvm: Nested Virtualization enabled\n"); 642 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
643 kvm_enable_efer_bits(EFER_SVME); 643 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
644 } 644 }
645 645
646 for_each_possible_cpu(cpu) { 646 for_each_possible_cpu(cpu) {
@@ -806,7 +806,7 @@ static void init_vmcb(struct vcpu_svm *svm)
806 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. 806 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
807 */ 807 */
808 svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; 808 svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
809 kvm_set_cr0(&svm->vcpu, svm->vcpu.arch.cr0); 809 (void)kvm_set_cr0(&svm->vcpu, svm->vcpu.arch.cr0);
810 810
811 save->cr4 = X86_CR4_PAE; 811 save->cr4 = X86_CR4_PAE;
812 /* rdx = ?? */ 812 /* rdx = ?? */
@@ -903,13 +903,18 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
903 svm->asid_generation = 0; 903 svm->asid_generation = 0;
904 init_vmcb(svm); 904 init_vmcb(svm);
905 905
906 fx_init(&svm->vcpu); 906 err = fx_init(&svm->vcpu);
907 if (err)
908 goto free_page4;
909
907 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; 910 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
908 if (kvm_vcpu_is_bsp(&svm->vcpu)) 911 if (kvm_vcpu_is_bsp(&svm->vcpu))
909 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; 912 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
910 913
911 return &svm->vcpu; 914 return &svm->vcpu;
912 915
916free_page4:
917 __free_page(hsave_page);
913free_page3: 918free_page3:
914 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER); 919 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
915free_page2: 920free_page2:
@@ -1488,7 +1493,7 @@ static void svm_handle_mce(struct vcpu_svm *svm)
1488 */ 1493 */
1489 pr_err("KVM: Guest triggered AMD Erratum 383\n"); 1494 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1490 1495
1491 set_bit(KVM_REQ_TRIPLE_FAULT, &svm->vcpu.requests); 1496 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
1492 1497
1493 return; 1498 return;
1494 } 1499 }
@@ -1535,7 +1540,7 @@ static int io_interception(struct vcpu_svm *svm)
1535 string = (io_info & SVM_IOIO_STR_MASK) != 0; 1540 string = (io_info & SVM_IOIO_STR_MASK) != 0;
1536 in = (io_info & SVM_IOIO_TYPE_MASK) != 0; 1541 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1537 if (string || in) 1542 if (string || in)
1538 return !(emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO); 1543 return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
1539 1544
1540 port = io_info >> 16; 1545 port = io_info >> 16;
1541 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; 1546 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
@@ -1957,7 +1962,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1957 svm->vmcb->save.cr3 = hsave->save.cr3; 1962 svm->vmcb->save.cr3 = hsave->save.cr3;
1958 svm->vcpu.arch.cr3 = hsave->save.cr3; 1963 svm->vcpu.arch.cr3 = hsave->save.cr3;
1959 } else { 1964 } else {
1960 kvm_set_cr3(&svm->vcpu, hsave->save.cr3); 1965 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
1961 } 1966 }
1962 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax); 1967 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
1963 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp); 1968 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
@@ -2080,7 +2085,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
2080 svm->vmcb->save.cr3 = nested_vmcb->save.cr3; 2085 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
2081 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3; 2086 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
2082 } else 2087 } else
2083 kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); 2088 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
2084 2089
2085 /* Guest paging mode is active - reset mmu */ 2090 /* Guest paging mode is active - reset mmu */
2086 kvm_mmu_reset_context(&svm->vcpu); 2091 kvm_mmu_reset_context(&svm->vcpu);
@@ -2386,16 +2391,12 @@ static int iret_interception(struct vcpu_svm *svm)
2386 2391
2387static int invlpg_interception(struct vcpu_svm *svm) 2392static int invlpg_interception(struct vcpu_svm *svm)
2388{ 2393{
2389 if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE) 2394 return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
2390 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
2391 return 1;
2392} 2395}
2393 2396
2394static int emulate_on_interception(struct vcpu_svm *svm) 2397static int emulate_on_interception(struct vcpu_svm *svm)
2395{ 2398{
2396 if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE) 2399 return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
2397 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
2398 return 1;
2399} 2400}
2400 2401
2401static int cr8_write_interception(struct vcpu_svm *svm) 2402static int cr8_write_interception(struct vcpu_svm *svm)
@@ -2431,7 +2432,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2431 *data = tsc_offset + native_read_tsc(); 2432 *data = tsc_offset + native_read_tsc();
2432 break; 2433 break;
2433 } 2434 }
2434 case MSR_K6_STAR: 2435 case MSR_STAR:
2435 *data = svm->vmcb->save.star; 2436 *data = svm->vmcb->save.star;
2436 break; 2437 break;
2437#ifdef CONFIG_X86_64 2438#ifdef CONFIG_X86_64
@@ -2555,7 +2556,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2555 2556
2556 break; 2557 break;
2557 } 2558 }
2558 case MSR_K6_STAR: 2559 case MSR_STAR:
2559 svm->vmcb->save.star = data; 2560 svm->vmcb->save.star = data;
2560 break; 2561 break;
2561#ifdef CONFIG_X86_64 2562#ifdef CONFIG_X86_64
@@ -2726,6 +2727,99 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2726 [SVM_EXIT_NPF] = pf_interception, 2727 [SVM_EXIT_NPF] = pf_interception,
2727}; 2728};
2728 2729
2730void dump_vmcb(struct kvm_vcpu *vcpu)
2731{
2732 struct vcpu_svm *svm = to_svm(vcpu);
2733 struct vmcb_control_area *control = &svm->vmcb->control;
2734 struct vmcb_save_area *save = &svm->vmcb->save;
2735
2736 pr_err("VMCB Control Area:\n");
2737 pr_err("cr_read: %04x\n", control->intercept_cr_read);
2738 pr_err("cr_write: %04x\n", control->intercept_cr_write);
2739 pr_err("dr_read: %04x\n", control->intercept_dr_read);
2740 pr_err("dr_write: %04x\n", control->intercept_dr_write);
2741 pr_err("exceptions: %08x\n", control->intercept_exceptions);
2742 pr_err("intercepts: %016llx\n", control->intercept);
2743 pr_err("pause filter count: %d\n", control->pause_filter_count);
2744 pr_err("iopm_base_pa: %016llx\n", control->iopm_base_pa);
2745 pr_err("msrpm_base_pa: %016llx\n", control->msrpm_base_pa);
2746 pr_err("tsc_offset: %016llx\n", control->tsc_offset);
2747 pr_err("asid: %d\n", control->asid);
2748 pr_err("tlb_ctl: %d\n", control->tlb_ctl);
2749 pr_err("int_ctl: %08x\n", control->int_ctl);
2750 pr_err("int_vector: %08x\n", control->int_vector);
2751 pr_err("int_state: %08x\n", control->int_state);
2752 pr_err("exit_code: %08x\n", control->exit_code);
2753 pr_err("exit_info1: %016llx\n", control->exit_info_1);
2754 pr_err("exit_info2: %016llx\n", control->exit_info_2);
2755 pr_err("exit_int_info: %08x\n", control->exit_int_info);
2756 pr_err("exit_int_info_err: %08x\n", control->exit_int_info_err);
2757 pr_err("nested_ctl: %lld\n", control->nested_ctl);
2758 pr_err("nested_cr3: %016llx\n", control->nested_cr3);
2759 pr_err("event_inj: %08x\n", control->event_inj);
2760 pr_err("event_inj_err: %08x\n", control->event_inj_err);
2761 pr_err("lbr_ctl: %lld\n", control->lbr_ctl);
2762 pr_err("next_rip: %016llx\n", control->next_rip);
2763 pr_err("VMCB State Save Area:\n");
2764 pr_err("es: s: %04x a: %04x l: %08x b: %016llx\n",
2765 save->es.selector, save->es.attrib,
2766 save->es.limit, save->es.base);
2767 pr_err("cs: s: %04x a: %04x l: %08x b: %016llx\n",
2768 save->cs.selector, save->cs.attrib,
2769 save->cs.limit, save->cs.base);
2770 pr_err("ss: s: %04x a: %04x l: %08x b: %016llx\n",
2771 save->ss.selector, save->ss.attrib,
2772 save->ss.limit, save->ss.base);
2773 pr_err("ds: s: %04x a: %04x l: %08x b: %016llx\n",
2774 save->ds.selector, save->ds.attrib,
2775 save->ds.limit, save->ds.base);
2776 pr_err("fs: s: %04x a: %04x l: %08x b: %016llx\n",
2777 save->fs.selector, save->fs.attrib,
2778 save->fs.limit, save->fs.base);
2779 pr_err("gs: s: %04x a: %04x l: %08x b: %016llx\n",
2780 save->gs.selector, save->gs.attrib,
2781 save->gs.limit, save->gs.base);
2782 pr_err("gdtr: s: %04x a: %04x l: %08x b: %016llx\n",
2783 save->gdtr.selector, save->gdtr.attrib,
2784 save->gdtr.limit, save->gdtr.base);
2785 pr_err("ldtr: s: %04x a: %04x l: %08x b: %016llx\n",
2786 save->ldtr.selector, save->ldtr.attrib,
2787 save->ldtr.limit, save->ldtr.base);
2788 pr_err("idtr: s: %04x a: %04x l: %08x b: %016llx\n",
2789 save->idtr.selector, save->idtr.attrib,
2790 save->idtr.limit, save->idtr.base);
2791 pr_err("tr: s: %04x a: %04x l: %08x b: %016llx\n",
2792 save->tr.selector, save->tr.attrib,
2793 save->tr.limit, save->tr.base);
2794 pr_err("cpl: %d efer: %016llx\n",
2795 save->cpl, save->efer);
2796 pr_err("cr0: %016llx cr2: %016llx\n",
2797 save->cr0, save->cr2);
2798 pr_err("cr3: %016llx cr4: %016llx\n",
2799 save->cr3, save->cr4);
2800 pr_err("dr6: %016llx dr7: %016llx\n",
2801 save->dr6, save->dr7);
2802 pr_err("rip: %016llx rflags: %016llx\n",
2803 save->rip, save->rflags);
2804 pr_err("rsp: %016llx rax: %016llx\n",
2805 save->rsp, save->rax);
2806 pr_err("star: %016llx lstar: %016llx\n",
2807 save->star, save->lstar);
2808 pr_err("cstar: %016llx sfmask: %016llx\n",
2809 save->cstar, save->sfmask);
2810 pr_err("kernel_gs_base: %016llx sysenter_cs: %016llx\n",
2811 save->kernel_gs_base, save->sysenter_cs);
2812 pr_err("sysenter_esp: %016llx sysenter_eip: %016llx\n",
2813 save->sysenter_esp, save->sysenter_eip);
2814 pr_err("gpat: %016llx dbgctl: %016llx\n",
2815 save->g_pat, save->dbgctl);
2816 pr_err("br_from: %016llx br_to: %016llx\n",
2817 save->br_from, save->br_to);
2818 pr_err("excp_from: %016llx excp_to: %016llx\n",
2819 save->last_excp_from, save->last_excp_to);
2820
2821}
2822
2729static int handle_exit(struct kvm_vcpu *vcpu) 2823static int handle_exit(struct kvm_vcpu *vcpu)
2730{ 2824{
2731 struct vcpu_svm *svm = to_svm(vcpu); 2825 struct vcpu_svm *svm = to_svm(vcpu);
@@ -2770,6 +2864,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
2770 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 2864 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2771 kvm_run->fail_entry.hardware_entry_failure_reason 2865 kvm_run->fail_entry.hardware_entry_failure_reason
2772 = svm->vmcb->control.exit_code; 2866 = svm->vmcb->control.exit_code;
2867 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
2868 dump_vmcb(vcpu);
2773 return 0; 2869 return 0;
2774 } 2870 }
2775 2871
@@ -2826,9 +2922,6 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
2826{ 2922{
2827 struct vmcb_control_area *control; 2923 struct vmcb_control_area *control;
2828 2924
2829 trace_kvm_inj_virq(irq);
2830
2831 ++svm->vcpu.stat.irq_injections;
2832 control = &svm->vmcb->control; 2925 control = &svm->vmcb->control;
2833 control->int_vector = irq; 2926 control->int_vector = irq;
2834 control->int_ctl &= ~V_INTR_PRIO_MASK; 2927 control->int_ctl &= ~V_INTR_PRIO_MASK;
@@ -2842,6 +2935,9 @@ static void svm_set_irq(struct kvm_vcpu *vcpu)
2842 2935
2843 BUG_ON(!(gif_set(svm))); 2936 BUG_ON(!(gif_set(svm)));
2844 2937
2938 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
2939 ++vcpu->stat.irq_injections;
2940
2845 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | 2941 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
2846 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; 2942 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
2847} 2943}
@@ -3327,6 +3423,11 @@ static bool svm_rdtscp_supported(void)
3327 return false; 3423 return false;
3328} 3424}
3329 3425
3426static bool svm_has_wbinvd_exit(void)
3427{
3428 return true;
3429}
3430
3330static void svm_fpu_deactivate(struct kvm_vcpu *vcpu) 3431static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
3331{ 3432{
3332 struct vcpu_svm *svm = to_svm(vcpu); 3433 struct vcpu_svm *svm = to_svm(vcpu);
@@ -3411,6 +3512,8 @@ static struct kvm_x86_ops svm_x86_ops = {
3411 .rdtscp_supported = svm_rdtscp_supported, 3512 .rdtscp_supported = svm_rdtscp_supported,
3412 3513
3413 .set_supported_cpuid = svm_set_supported_cpuid, 3514 .set_supported_cpuid = svm_set_supported_cpuid,
3515
3516 .has_wbinvd_exit = svm_has_wbinvd_exit,
3414}; 3517};
3415 3518
3416static int __init svm_init(void) 3519static int __init svm_init(void)