aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2008-09-26 03:30:55 -0400
committerAvi Kivity <avi@redhat.com>2008-12-31 09:51:42 -0500
commitc4abb7c9cde24b7351a47328ef866e6a2bbb1ad0 (patch)
tree85cef82232898397e645e53334655712af54085f /arch/x86/kvm/x86.c
parent26df99c6c5807115f06d4e1abae397b7f5f3e00c (diff)
KVM: x86: Support for user space injected NMIs
Introduces the KVM_NMI IOCTL to the generic x86 part of KVM for injecting NMIs from user space and also extends the statistic report accordingly. Based on the original patch by Sheng Yang. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Sheng Yang <sheng.yang@intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c46
1 files changed, 44 insertions, 2 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1fa9a6db633..07971451b94 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -86,6 +86,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
86 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 86 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
87 { "hypercalls", VCPU_STAT(hypercalls) }, 87 { "hypercalls", VCPU_STAT(hypercalls) },
88 { "request_irq", VCPU_STAT(request_irq_exits) }, 88 { "request_irq", VCPU_STAT(request_irq_exits) },
89 { "request_nmi", VCPU_STAT(request_nmi_exits) },
89 { "irq_exits", VCPU_STAT(irq_exits) }, 90 { "irq_exits", VCPU_STAT(irq_exits) },
90 { "host_state_reload", VCPU_STAT(host_state_reload) }, 91 { "host_state_reload", VCPU_STAT(host_state_reload) },
91 { "efer_reload", VCPU_STAT(efer_reload) }, 92 { "efer_reload", VCPU_STAT(efer_reload) },
@@ -93,6 +94,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
93 { "insn_emulation", VCPU_STAT(insn_emulation) }, 94 { "insn_emulation", VCPU_STAT(insn_emulation) },
94 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, 95 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
95 { "irq_injections", VCPU_STAT(irq_injections) }, 96 { "irq_injections", VCPU_STAT(irq_injections) },
97 { "nmi_injections", VCPU_STAT(nmi_injections) },
96 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, 98 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
97 { "mmu_pte_write", VM_STAT(mmu_pte_write) }, 99 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
98 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, 100 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
@@ -1318,6 +1320,15 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1318 return 0; 1320 return 0;
1319} 1321}
1320 1322
1323static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
1324{
1325 vcpu_load(vcpu);
1326 kvm_inject_nmi(vcpu);
1327 vcpu_put(vcpu);
1328
1329 return 0;
1330}
1331
1321static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, 1332static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1322 struct kvm_tpr_access_ctl *tac) 1333 struct kvm_tpr_access_ctl *tac)
1323{ 1334{
@@ -1377,6 +1388,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
1377 r = 0; 1388 r = 0;
1378 break; 1389 break;
1379 } 1390 }
1391 case KVM_NMI: {
1392 r = kvm_vcpu_ioctl_nmi(vcpu);
1393 if (r)
1394 goto out;
1395 r = 0;
1396 break;
1397 }
1380 case KVM_SET_CPUID: { 1398 case KVM_SET_CPUID: {
1381 struct kvm_cpuid __user *cpuid_arg = argp; 1399 struct kvm_cpuid __user *cpuid_arg = argp;
1382 struct kvm_cpuid cpuid; 1400 struct kvm_cpuid cpuid;
@@ -2812,18 +2830,37 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2812 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF)); 2830 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
2813} 2831}
2814 2832
2833/*
2834 * Check if userspace requested a NMI window, and that the NMI window
2835 * is open.
2836 *
2837 * No need to exit to userspace if we already have a NMI queued.
2838 */
2839static int dm_request_for_nmi_injection(struct kvm_vcpu *vcpu,
2840 struct kvm_run *kvm_run)
2841{
2842 return (!vcpu->arch.nmi_pending &&
2843 kvm_run->request_nmi_window &&
2844 vcpu->arch.nmi_window_open);
2845}
2846
2815static void post_kvm_run_save(struct kvm_vcpu *vcpu, 2847static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2816 struct kvm_run *kvm_run) 2848 struct kvm_run *kvm_run)
2817{ 2849{
2818 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0; 2850 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2819 kvm_run->cr8 = kvm_get_cr8(vcpu); 2851 kvm_run->cr8 = kvm_get_cr8(vcpu);
2820 kvm_run->apic_base = kvm_get_apic_base(vcpu); 2852 kvm_run->apic_base = kvm_get_apic_base(vcpu);
2821 if (irqchip_in_kernel(vcpu->kvm)) 2853 if (irqchip_in_kernel(vcpu->kvm)) {
2822 kvm_run->ready_for_interrupt_injection = 1; 2854 kvm_run->ready_for_interrupt_injection = 1;
2823 else 2855 kvm_run->ready_for_nmi_injection = 1;
2856 } else {
2824 kvm_run->ready_for_interrupt_injection = 2857 kvm_run->ready_for_interrupt_injection =
2825 (vcpu->arch.interrupt_window_open && 2858 (vcpu->arch.interrupt_window_open &&
2826 vcpu->arch.irq_summary == 0); 2859 vcpu->arch.irq_summary == 0);
2860 kvm_run->ready_for_nmi_injection =
2861 (vcpu->arch.nmi_window_open &&
2862 vcpu->arch.nmi_pending == 0);
2863 }
2827} 2864}
2828 2865
2829static void vapic_enter(struct kvm_vcpu *vcpu) 2866static void vapic_enter(struct kvm_vcpu *vcpu)
@@ -2999,6 +3036,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2999 } 3036 }
3000 3037
3001 if (r > 0) { 3038 if (r > 0) {
3039 if (dm_request_for_nmi_injection(vcpu, kvm_run)) {
3040 r = -EINTR;
3041 kvm_run->exit_reason = KVM_EXIT_NMI;
3042 ++vcpu->stat.request_nmi_exits;
3043 }
3002 if (dm_request_for_irq_injection(vcpu, kvm_run)) { 3044 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
3003 r = -EINTR; 3045 r = -EINTR;
3004 kvm_run->exit_reason = KVM_EXIT_INTR; 3046 kvm_run->exit_reason = KVM_EXIT_INTR;