diff options
-rw-r--r-- | Documentation/kvm/api.txt | 49 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm.h | 28 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 22 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 30 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 77 | ||||
-rw-r--r-- | include/linux/kvm.h | 6 |
7 files changed, 214 insertions, 0 deletions
diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt index 36594ba57723..e1a114161027 100644 --- a/Documentation/kvm/api.txt +++ b/Documentation/kvm/api.txt | |||
@@ -653,6 +653,55 @@ struct kvm_clock_data { | |||
653 | __u32 pad[9]; | 653 | __u32 pad[9]; |
654 | }; | 654 | }; |
655 | 655 | ||
656 | 4.29 KVM_GET_VCPU_EVENTS | ||
657 | |||
658 | Capability: KVM_CAP_VCPU_EVENTS | ||
659 | Architectures: x86 | ||
660 | Type: vm ioctl | ||
661 | Parameters: struct kvm_vcpu_event (out) | ||
662 | Returns: 0 on success, -1 on error | ||
663 | |||
664 | Gets currently pending exceptions, interrupts, and NMIs as well as related | ||
665 | states of the vcpu. | ||
666 | |||
667 | struct kvm_vcpu_events { | ||
668 | struct { | ||
669 | __u8 injected; | ||
670 | __u8 nr; | ||
671 | __u8 has_error_code; | ||
672 | __u8 pad; | ||
673 | __u32 error_code; | ||
674 | } exception; | ||
675 | struct { | ||
676 | __u8 injected; | ||
677 | __u8 nr; | ||
678 | __u8 soft; | ||
679 | __u8 pad; | ||
680 | } interrupt; | ||
681 | struct { | ||
682 | __u8 injected; | ||
683 | __u8 pending; | ||
684 | __u8 masked; | ||
685 | __u8 pad; | ||
686 | } nmi; | ||
687 | __u32 sipi_vector; | ||
688 | __u32 flags; /* must be zero */ | ||
689 | }; | ||
690 | |||
691 | 4.30 KVM_SET_VCPU_EVENTS | ||
692 | |||
693 | Capability: KVM_CAP_VCPU_EVENTS | ||
694 | Architectures: x86 | ||
695 | Type: vm ioctl | ||
696 | Parameters: struct kvm_vcpu_event (in) | ||
697 | Returns: 0 on success, -1 on error | ||
698 | |||
699 | Set pending exceptions, interrupts, and NMIs as well as related states of the | ||
700 | vcpu. | ||
701 | |||
702 | See KVM_GET_VCPU_EVENTS for the data structure. | ||
703 | |||
704 | |||
656 | 5. The kvm_run structure | 705 | 5. The kvm_run structure |
657 | 706 | ||
658 | Application code obtains a pointer to the kvm_run structure by | 707 | Application code obtains a pointer to the kvm_run structure by |
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index ef9b4b73cce4..950df434763f 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #define __KVM_HAVE_MCE | 20 | #define __KVM_HAVE_MCE |
21 | #define __KVM_HAVE_PIT_STATE2 | 21 | #define __KVM_HAVE_PIT_STATE2 |
22 | #define __KVM_HAVE_XEN_HVM | 22 | #define __KVM_HAVE_XEN_HVM |
23 | #define __KVM_HAVE_VCPU_EVENTS | ||
23 | 24 | ||
24 | /* Architectural interrupt line count. */ | 25 | /* Architectural interrupt line count. */ |
25 | #define KVM_NR_INTERRUPTS 256 | 26 | #define KVM_NR_INTERRUPTS 256 |
@@ -252,4 +253,31 @@ struct kvm_reinject_control { | |||
252 | __u8 pit_reinject; | 253 | __u8 pit_reinject; |
253 | __u8 reserved[31]; | 254 | __u8 reserved[31]; |
254 | }; | 255 | }; |
256 | |||
257 | /* for KVM_GET/SET_VCPU_EVENTS */ | ||
258 | struct kvm_vcpu_events { | ||
259 | struct { | ||
260 | __u8 injected; | ||
261 | __u8 nr; | ||
262 | __u8 has_error_code; | ||
263 | __u8 pad; | ||
264 | __u32 error_code; | ||
265 | } exception; | ||
266 | struct { | ||
267 | __u8 injected; | ||
268 | __u8 nr; | ||
269 | __u8 soft; | ||
270 | __u8 pad; | ||
271 | } interrupt; | ||
272 | struct { | ||
273 | __u8 injected; | ||
274 | __u8 pending; | ||
275 | __u8 masked; | ||
276 | __u8 pad; | ||
277 | } nmi; | ||
278 | __u32 sipi_vector; | ||
279 | __u32 flags; | ||
280 | __u32 reserved[10]; | ||
281 | }; | ||
282 | |||
255 | #endif /* _ASM_X86_KVM_H */ | 283 | #endif /* _ASM_X86_KVM_H */ |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 26a74b7bb6bc..06e085614dad 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -523,6 +523,8 @@ struct kvm_x86_ops { | |||
523 | bool has_error_code, u32 error_code); | 523 | bool has_error_code, u32 error_code); |
524 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); | 524 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); |
525 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); | 525 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); |
526 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); | ||
527 | void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); | ||
526 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); | 528 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); |
527 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); | 529 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); |
528 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); | 530 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 34b700f9e498..3de0b37ec038 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -2499,6 +2499,26 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu) | |||
2499 | !(svm->vcpu.arch.hflags & HF_NMI_MASK); | 2499 | !(svm->vcpu.arch.hflags & HF_NMI_MASK); |
2500 | } | 2500 | } |
2501 | 2501 | ||
2502 | static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) | ||
2503 | { | ||
2504 | struct vcpu_svm *svm = to_svm(vcpu); | ||
2505 | |||
2506 | return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); | ||
2507 | } | ||
2508 | |||
2509 | static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) | ||
2510 | { | ||
2511 | struct vcpu_svm *svm = to_svm(vcpu); | ||
2512 | |||
2513 | if (masked) { | ||
2514 | svm->vcpu.arch.hflags |= HF_NMI_MASK; | ||
2515 | svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET); | ||
2516 | } else { | ||
2517 | svm->vcpu.arch.hflags &= ~HF_NMI_MASK; | ||
2518 | svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); | ||
2519 | } | ||
2520 | } | ||
2521 | |||
2502 | static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) | 2522 | static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) |
2503 | { | 2523 | { |
2504 | struct vcpu_svm *svm = to_svm(vcpu); | 2524 | struct vcpu_svm *svm = to_svm(vcpu); |
@@ -2946,6 +2966,8 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
2946 | .queue_exception = svm_queue_exception, | 2966 | .queue_exception = svm_queue_exception, |
2947 | .interrupt_allowed = svm_interrupt_allowed, | 2967 | .interrupt_allowed = svm_interrupt_allowed, |
2948 | .nmi_allowed = svm_nmi_allowed, | 2968 | .nmi_allowed = svm_nmi_allowed, |
2969 | .get_nmi_mask = svm_get_nmi_mask, | ||
2970 | .set_nmi_mask = svm_set_nmi_mask, | ||
2949 | .enable_nmi_window = enable_nmi_window, | 2971 | .enable_nmi_window = enable_nmi_window, |
2950 | .enable_irq_window = enable_irq_window, | 2972 | .enable_irq_window = enable_irq_window, |
2951 | .update_cr8_intercept = update_cr8_intercept, | 2973 | .update_cr8_intercept = update_cr8_intercept, |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 22fcd27a0b58..778f059ae423 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2639,6 +2639,34 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) | |||
2639 | GUEST_INTR_STATE_NMI)); | 2639 | GUEST_INTR_STATE_NMI)); |
2640 | } | 2640 | } |
2641 | 2641 | ||
2642 | static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) | ||
2643 | { | ||
2644 | if (!cpu_has_virtual_nmis()) | ||
2645 | return to_vmx(vcpu)->soft_vnmi_blocked; | ||
2646 | else | ||
2647 | return !!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & | ||
2648 | GUEST_INTR_STATE_NMI); | ||
2649 | } | ||
2650 | |||
2651 | static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) | ||
2652 | { | ||
2653 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2654 | |||
2655 | if (!cpu_has_virtual_nmis()) { | ||
2656 | if (vmx->soft_vnmi_blocked != masked) { | ||
2657 | vmx->soft_vnmi_blocked = masked; | ||
2658 | vmx->vnmi_blocked_time = 0; | ||
2659 | } | ||
2660 | } else { | ||
2661 | if (masked) | ||
2662 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, | ||
2663 | GUEST_INTR_STATE_NMI); | ||
2664 | else | ||
2665 | vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, | ||
2666 | GUEST_INTR_STATE_NMI); | ||
2667 | } | ||
2668 | } | ||
2669 | |||
2642 | static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) | 2670 | static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) |
2643 | { | 2671 | { |
2644 | return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && | 2672 | return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && |
@@ -3985,6 +4013,8 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
3985 | .queue_exception = vmx_queue_exception, | 4013 | .queue_exception = vmx_queue_exception, |
3986 | .interrupt_allowed = vmx_interrupt_allowed, | 4014 | .interrupt_allowed = vmx_interrupt_allowed, |
3987 | .nmi_allowed = vmx_nmi_allowed, | 4015 | .nmi_allowed = vmx_nmi_allowed, |
4016 | .get_nmi_mask = vmx_get_nmi_mask, | ||
4017 | .set_nmi_mask = vmx_set_nmi_mask, | ||
3988 | .enable_nmi_window = enable_nmi_window, | 4018 | .enable_nmi_window = enable_nmi_window, |
3989 | .enable_irq_window = enable_irq_window, | 4019 | .enable_irq_window = enable_irq_window, |
3990 | .update_cr8_intercept = update_cr8_intercept, | 4020 | .update_cr8_intercept = update_cr8_intercept, |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ba8958dca3c4..35eea30821d6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1342,6 +1342,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
1342 | case KVM_CAP_SET_IDENTITY_MAP_ADDR: | 1342 | case KVM_CAP_SET_IDENTITY_MAP_ADDR: |
1343 | case KVM_CAP_XEN_HVM: | 1343 | case KVM_CAP_XEN_HVM: |
1344 | case KVM_CAP_ADJUST_CLOCK: | 1344 | case KVM_CAP_ADJUST_CLOCK: |
1345 | case KVM_CAP_VCPU_EVENTS: | ||
1345 | r = 1; | 1346 | r = 1; |
1346 | break; | 1347 | break; |
1347 | case KVM_CAP_COALESCED_MMIO: | 1348 | case KVM_CAP_COALESCED_MMIO: |
@@ -1883,6 +1884,61 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, | |||
1883 | return 0; | 1884 | return 0; |
1884 | } | 1885 | } |
1885 | 1886 | ||
1887 | static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, | ||
1888 | struct kvm_vcpu_events *events) | ||
1889 | { | ||
1890 | vcpu_load(vcpu); | ||
1891 | |||
1892 | events->exception.injected = vcpu->arch.exception.pending; | ||
1893 | events->exception.nr = vcpu->arch.exception.nr; | ||
1894 | events->exception.has_error_code = vcpu->arch.exception.has_error_code; | ||
1895 | events->exception.error_code = vcpu->arch.exception.error_code; | ||
1896 | |||
1897 | events->interrupt.injected = vcpu->arch.interrupt.pending; | ||
1898 | events->interrupt.nr = vcpu->arch.interrupt.nr; | ||
1899 | events->interrupt.soft = vcpu->arch.interrupt.soft; | ||
1900 | |||
1901 | events->nmi.injected = vcpu->arch.nmi_injected; | ||
1902 | events->nmi.pending = vcpu->arch.nmi_pending; | ||
1903 | events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); | ||
1904 | |||
1905 | events->sipi_vector = vcpu->arch.sipi_vector; | ||
1906 | |||
1907 | events->flags = 0; | ||
1908 | |||
1909 | vcpu_put(vcpu); | ||
1910 | } | ||
1911 | |||
1912 | static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | ||
1913 | struct kvm_vcpu_events *events) | ||
1914 | { | ||
1915 | if (events->flags) | ||
1916 | return -EINVAL; | ||
1917 | |||
1918 | vcpu_load(vcpu); | ||
1919 | |||
1920 | vcpu->arch.exception.pending = events->exception.injected; | ||
1921 | vcpu->arch.exception.nr = events->exception.nr; | ||
1922 | vcpu->arch.exception.has_error_code = events->exception.has_error_code; | ||
1923 | vcpu->arch.exception.error_code = events->exception.error_code; | ||
1924 | |||
1925 | vcpu->arch.interrupt.pending = events->interrupt.injected; | ||
1926 | vcpu->arch.interrupt.nr = events->interrupt.nr; | ||
1927 | vcpu->arch.interrupt.soft = events->interrupt.soft; | ||
1928 | if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm)) | ||
1929 | kvm_pic_clear_isr_ack(vcpu->kvm); | ||
1930 | |||
1931 | vcpu->arch.nmi_injected = events->nmi.injected; | ||
1932 | vcpu->arch.nmi_pending = events->nmi.pending; | ||
1933 | kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); | ||
1934 | |||
1935 | vcpu->arch.sipi_vector = events->sipi_vector; | ||
1936 | |||
1937 | vcpu_put(vcpu); | ||
1938 | |||
1939 | return 0; | ||
1940 | } | ||
1941 | |||
1886 | long kvm_arch_vcpu_ioctl(struct file *filp, | 1942 | long kvm_arch_vcpu_ioctl(struct file *filp, |
1887 | unsigned int ioctl, unsigned long arg) | 1943 | unsigned int ioctl, unsigned long arg) |
1888 | { | 1944 | { |
@@ -2040,6 +2096,27 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
2040 | r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); | 2096 | r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); |
2041 | break; | 2097 | break; |
2042 | } | 2098 | } |
2099 | case KVM_GET_VCPU_EVENTS: { | ||
2100 | struct kvm_vcpu_events events; | ||
2101 | |||
2102 | kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); | ||
2103 | |||
2104 | r = -EFAULT; | ||
2105 | if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) | ||
2106 | break; | ||
2107 | r = 0; | ||
2108 | break; | ||
2109 | } | ||
2110 | case KVM_SET_VCPU_EVENTS: { | ||
2111 | struct kvm_vcpu_events events; | ||
2112 | |||
2113 | r = -EFAULT; | ||
2114 | if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) | ||
2115 | break; | ||
2116 | |||
2117 | r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); | ||
2118 | break; | ||
2119 | } | ||
2043 | default: | 2120 | default: |
2044 | r = -EINVAL; | 2121 | r = -EINVAL; |
2045 | } | 2122 | } |
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 976f4d181858..92045a92d714 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -489,6 +489,9 @@ struct kvm_ioeventfd { | |||
489 | #endif | 489 | #endif |
490 | #define KVM_CAP_ADJUST_CLOCK 39 | 490 | #define KVM_CAP_ADJUST_CLOCK 39 |
491 | #define KVM_CAP_INTERNAL_ERROR_DATA 40 | 491 | #define KVM_CAP_INTERNAL_ERROR_DATA 40 |
492 | #ifdef __KVM_HAVE_VCPU_EVENTS | ||
493 | #define KVM_CAP_VCPU_EVENTS 41 | ||
494 | #endif | ||
492 | 495 | ||
493 | #ifdef KVM_CAP_IRQ_ROUTING | 496 | #ifdef KVM_CAP_IRQ_ROUTING |
494 | 497 | ||
@@ -672,6 +675,9 @@ struct kvm_clock_data { | |||
672 | /* IA64 stack access */ | 675 | /* IA64 stack access */ |
673 | #define KVM_IA64_VCPU_GET_STACK _IOR(KVMIO, 0x9a, void *) | 676 | #define KVM_IA64_VCPU_GET_STACK _IOR(KVMIO, 0x9a, void *) |
674 | #define KVM_IA64_VCPU_SET_STACK _IOW(KVMIO, 0x9b, void *) | 677 | #define KVM_IA64_VCPU_SET_STACK _IOW(KVMIO, 0x9b, void *) |
678 | /* Available with KVM_CAP_VCPU_EVENTS */ | ||
679 | #define KVM_GET_VCPU_EVENTS _IOR(KVMIO, 0x9f, struct kvm_vcpu_events) | ||
680 | #define KVM_SET_VCPU_EVENTS _IOW(KVMIO, 0xa0, struct kvm_vcpu_events) | ||
675 | 681 | ||
676 | #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) | 682 | #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) |
677 | 683 | ||