aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSheng Yang <sheng.yang@intel.com>2008-05-15 06:23:25 -0400
committerAvi Kivity <avi@qumranet.com>2008-07-20 05:42:26 -0400
commitf08864b42a45581a64558aa5b6b673c77b97ee5d (patch)
treed104bf34c951beffebb7c2402329f236a7bad7ad /arch
parent3419ffc8e45a5344abc87684cbca6cdc5c9c8a01 (diff)
KVM: VMX: Enable NMI with in-kernel irqchip
Signed-off-by: Sheng Yang <sheng.yang@intel.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/vmx.c124
-rw-r--r--arch/x86/kvm/vmx.h12
-rw-r--r--arch/x86/kvm/x86.c1
3 files changed, 118 insertions, 19 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b99bb37e5dec..1bb994657208 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -264,6 +264,11 @@ static inline int cpu_has_vmx_vpid(void)
264 SECONDARY_EXEC_ENABLE_VPID); 264 SECONDARY_EXEC_ENABLE_VPID);
265} 265}
266 266
267static inline int cpu_has_virtual_nmis(void)
268{
269 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
270}
271
267static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) 272static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
268{ 273{
269 int i; 274 int i;
@@ -1088,7 +1093,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
1088 u32 _vmentry_control = 0; 1093 u32 _vmentry_control = 0;
1089 1094
1090 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; 1095 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
1091 opt = 0; 1096 opt = PIN_BASED_VIRTUAL_NMIS;
1092 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, 1097 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
1093 &_pin_based_exec_control) < 0) 1098 &_pin_based_exec_control) < 0)
1094 return -EIO; 1099 return -EIO;
@@ -2130,6 +2135,13 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
2130 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); 2135 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
2131} 2136}
2132 2137
2138static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
2139{
2140 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2141 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
2142 vcpu->arch.nmi_pending = 0;
2143}
2144
2133static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) 2145static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
2134{ 2146{
2135 int word_index = __ffs(vcpu->arch.irq_summary); 2147 int word_index = __ffs(vcpu->arch.irq_summary);
@@ -2653,6 +2665,19 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2653 return 1; 2665 return 1;
2654} 2666}
2655 2667
2668static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2669{
2670 u32 cpu_based_vm_exec_control;
2671
2672 /* clear pending NMI */
2673 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2674 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
2675 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2676 ++vcpu->stat.nmi_window_exits;
2677
2678 return 1;
2679}
2680
2656/* 2681/*
2657 * The exit handlers return 1 if the exit was handled fully and guest execution 2682 * The exit handlers return 1 if the exit was handled fully and guest execution
2658 * may resume. Otherwise they set the kvm_run parameter to indicate what needs 2683 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -2663,6 +2688,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
2663 [EXIT_REASON_EXCEPTION_NMI] = handle_exception, 2688 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
2664 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, 2689 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
2665 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, 2690 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
2691 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
2666 [EXIT_REASON_IO_INSTRUCTION] = handle_io, 2692 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
2667 [EXIT_REASON_CR_ACCESS] = handle_cr, 2693 [EXIT_REASON_CR_ACCESS] = handle_cr,
2668 [EXIT_REASON_DR_ACCESS] = handle_dr, 2694 [EXIT_REASON_DR_ACCESS] = handle_dr,
@@ -2750,17 +2776,52 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
2750 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); 2776 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2751} 2777}
2752 2778
2779static void enable_nmi_window(struct kvm_vcpu *vcpu)
2780{
2781 u32 cpu_based_vm_exec_control;
2782
2783 if (!cpu_has_virtual_nmis())
2784 return;
2785
2786 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2787 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
2788 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2789}
2790
2791static int vmx_nmi_enabled(struct kvm_vcpu *vcpu)
2792{
2793 u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2794 return !(guest_intr & (GUEST_INTR_STATE_NMI |
2795 GUEST_INTR_STATE_MOV_SS |
2796 GUEST_INTR_STATE_STI));
2797}
2798
2799static int vmx_irq_enabled(struct kvm_vcpu *vcpu)
2800{
2801 u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2802 return (!(guest_intr & (GUEST_INTR_STATE_MOV_SS |
2803 GUEST_INTR_STATE_STI)) &&
2804 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
2805}
2806
2807static void enable_intr_window(struct kvm_vcpu *vcpu)
2808{
2809 if (vcpu->arch.nmi_pending)
2810 enable_nmi_window(vcpu);
2811 else if (kvm_cpu_has_interrupt(vcpu))
2812 enable_irq_window(vcpu);
2813}
2814
2753static void vmx_intr_assist(struct kvm_vcpu *vcpu) 2815static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2754{ 2816{
2755 struct vcpu_vmx *vmx = to_vmx(vcpu); 2817 struct vcpu_vmx *vmx = to_vmx(vcpu);
2756 u32 idtv_info_field, intr_info_field; 2818 u32 idtv_info_field, intr_info_field, exit_intr_info_field;
2757 int has_ext_irq, interrupt_window_open;
2758 int vector; 2819 int vector;
2759 2820
2760 update_tpr_threshold(vcpu); 2821 update_tpr_threshold(vcpu);
2761 2822
2762 has_ext_irq = kvm_cpu_has_interrupt(vcpu);
2763 intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD); 2823 intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
2824 exit_intr_info_field = vmcs_read32(VM_EXIT_INTR_INFO);
2764 idtv_info_field = vmx->idt_vectoring_info; 2825 idtv_info_field = vmx->idt_vectoring_info;
2765 if (intr_info_field & INTR_INFO_VALID_MASK) { 2826 if (intr_info_field & INTR_INFO_VALID_MASK) {
2766 if (idtv_info_field & INTR_INFO_VALID_MASK) { 2827 if (idtv_info_field & INTR_INFO_VALID_MASK) {
@@ -2768,8 +2829,7 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2768 if (printk_ratelimit()) 2829 if (printk_ratelimit())
2769 printk(KERN_ERR "Fault when IDT_Vectoring\n"); 2830 printk(KERN_ERR "Fault when IDT_Vectoring\n");
2770 } 2831 }
2771 if (has_ext_irq) 2832 enable_intr_window(vcpu);
2772 enable_irq_window(vcpu);
2773 return; 2833 return;
2774 } 2834 }
2775 if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) { 2835 if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
@@ -2779,30 +2839,56 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2779 u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK; 2839 u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK;
2780 2840
2781 vmx_inject_irq(vcpu, vect); 2841 vmx_inject_irq(vcpu, vect);
2782 if (unlikely(has_ext_irq)) 2842 enable_intr_window(vcpu);
2783 enable_irq_window(vcpu);
2784 return; 2843 return;
2785 } 2844 }
2786 2845
2787 KVMTRACE_1D(REDELIVER_EVT, vcpu, idtv_info_field, handler); 2846 KVMTRACE_1D(REDELIVER_EVT, vcpu, idtv_info_field, handler);
2788 2847
2789 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field); 2848 /*
2849 * SDM 3: 25.7.1.2
2850 * Clear bit "block by NMI" before VM entry if a NMI delivery
2851 * faulted.
2852 */
2853 if ((idtv_info_field & VECTORING_INFO_TYPE_MASK)
2854 == INTR_TYPE_NMI_INTR && cpu_has_virtual_nmis())
2855 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2856 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
2857 ~GUEST_INTR_STATE_NMI);
2858
2859 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field
2860 & ~INTR_INFO_RESVD_BITS_MASK);
2790 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 2861 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2791 vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); 2862 vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
2792 2863
2793 if (unlikely(idtv_info_field & INTR_INFO_DELIVER_CODE_MASK)) 2864 if (unlikely(idtv_info_field & INTR_INFO_DELIVER_CODE_MASK))
2794 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 2865 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2795 vmcs_read32(IDT_VECTORING_ERROR_CODE)); 2866 vmcs_read32(IDT_VECTORING_ERROR_CODE));
2796 if (unlikely(has_ext_irq)) 2867 enable_intr_window(vcpu);
2797 enable_irq_window(vcpu);
2798 return; 2868 return;
2799 } 2869 }
2800 if (!has_ext_irq) 2870 if (cpu_has_virtual_nmis()) {
2871 /*
2872 * SDM 3: 25.7.1.2
2873 * Re-set bit "block by NMI" before VM entry if vmexit caused by
2874 * a guest IRET fault.
2875 */
2876 if ((exit_intr_info_field & INTR_INFO_UNBLOCK_NMI) &&
2877 (exit_intr_info_field & INTR_INFO_VECTOR_MASK) != 8)
2878 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2879 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) |
2880 GUEST_INTR_STATE_NMI);
2881 else if (vcpu->arch.nmi_pending) {
2882 if (vmx_nmi_enabled(vcpu))
2883 vmx_inject_nmi(vcpu);
2884 enable_intr_window(vcpu);
2885 return;
2886 }
2887
2888 }
2889 if (!kvm_cpu_has_interrupt(vcpu))
2801 return; 2890 return;
2802 interrupt_window_open = 2891 if (vmx_irq_enabled(vcpu)) {
2803 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
2804 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
2805 if (interrupt_window_open) {
2806 vector = kvm_cpu_get_interrupt(vcpu); 2892 vector = kvm_cpu_get_interrupt(vcpu);
2807 vmx_inject_irq(vcpu, vector); 2893 vmx_inject_irq(vcpu, vector);
2808 kvm_timer_intr_post(vcpu, vector); 2894 kvm_timer_intr_post(vcpu, vector);
@@ -2963,7 +3049,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2963 fixup_rmode_irq(vmx); 3049 fixup_rmode_irq(vmx);
2964 3050
2965 vcpu->arch.interrupt_window_open = 3051 vcpu->arch.interrupt_window_open =
2966 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; 3052 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
3053 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)) == 0;
2967 3054
2968 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 3055 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
2969 vmx->launched = 1; 3056 vmx->launched = 1;
@@ -2971,7 +3058,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2971 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 3058 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
2972 3059
2973 /* We need to handle NMIs before interrupts are enabled */ 3060 /* We need to handle NMIs before interrupts are enabled */
2974 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */ 3061 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200 &&
3062 (intr_info & INTR_INFO_VALID_MASK)) {
2975 KVMTRACE_0D(NMI, vcpu, handler); 3063 KVMTRACE_0D(NMI, vcpu, handler);
2976 asm("int $2"); 3064 asm("int $2");
2977 } 3065 }
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
index 79d94c610dfe..425a13436b3f 100644
--- a/arch/x86/kvm/vmx.h
+++ b/arch/x86/kvm/vmx.h
@@ -40,6 +40,7 @@
40#define CPU_BASED_CR8_LOAD_EXITING 0x00080000 40#define CPU_BASED_CR8_LOAD_EXITING 0x00080000
41#define CPU_BASED_CR8_STORE_EXITING 0x00100000 41#define CPU_BASED_CR8_STORE_EXITING 0x00100000
42#define CPU_BASED_TPR_SHADOW 0x00200000 42#define CPU_BASED_TPR_SHADOW 0x00200000
43#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
43#define CPU_BASED_MOV_DR_EXITING 0x00800000 44#define CPU_BASED_MOV_DR_EXITING 0x00800000
44#define CPU_BASED_UNCOND_IO_EXITING 0x01000000 45#define CPU_BASED_UNCOND_IO_EXITING 0x01000000
45#define CPU_BASED_USE_IO_BITMAPS 0x02000000 46#define CPU_BASED_USE_IO_BITMAPS 0x02000000
@@ -216,7 +217,7 @@ enum vmcs_field {
216#define EXIT_REASON_TRIPLE_FAULT 2 217#define EXIT_REASON_TRIPLE_FAULT 2
217 218
218#define EXIT_REASON_PENDING_INTERRUPT 7 219#define EXIT_REASON_PENDING_INTERRUPT 7
219 220#define EXIT_REASON_NMI_WINDOW 8
220#define EXIT_REASON_TASK_SWITCH 9 221#define EXIT_REASON_TASK_SWITCH 9
221#define EXIT_REASON_CPUID 10 222#define EXIT_REASON_CPUID 10
222#define EXIT_REASON_HLT 12 223#define EXIT_REASON_HLT 12
@@ -251,7 +252,9 @@ enum vmcs_field {
251#define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ 252#define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
252#define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ 253#define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
253#define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */ 254#define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */
255#define INTR_INFO_UNBLOCK_NMI 0x1000 /* 12 */
254#define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ 256#define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
257#define INTR_INFO_RESVD_BITS_MASK 0x7ffff000
255 258
256#define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK 259#define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK
257#define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK 260#define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK
@@ -259,9 +262,16 @@ enum vmcs_field {
259#define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK 262#define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK
260 263
261#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ 264#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
265#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
262#define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */ 266#define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */
263#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ 267#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
264 268
269/* GUEST_INTERRUPTIBILITY_INFO flags. */
270#define GUEST_INTR_STATE_STI 0x00000001
271#define GUEST_INTR_STATE_MOV_SS 0x00000002
272#define GUEST_INTR_STATE_SMI 0x00000004
273#define GUEST_INTR_STATE_NMI 0x00000008
274
265/* 275/*
266 * Exit Qualifications for MOV for Control Register Access 276 * Exit Qualifications for MOV for Control Register Access
267 */ 277 */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 19974dde6567..05b54976c891 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -72,6 +72,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
72 { "mmio_exits", VCPU_STAT(mmio_exits) }, 72 { "mmio_exits", VCPU_STAT(mmio_exits) },
73 { "signal_exits", VCPU_STAT(signal_exits) }, 73 { "signal_exits", VCPU_STAT(signal_exits) },
74 { "irq_window", VCPU_STAT(irq_window_exits) }, 74 { "irq_window", VCPU_STAT(irq_window_exits) },
75 { "nmi_window", VCPU_STAT(nmi_window_exits) },
75 { "halt_exits", VCPU_STAT(halt_exits) }, 76 { "halt_exits", VCPU_STAT(halt_exits) },
76 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 77 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
77 { "hypercalls", VCPU_STAT(hypercalls) }, 78 { "hypercalls", VCPU_STAT(hypercalls) },