aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@web.de>2009-11-11 19:04:25 -0500
committerAvi Kivity <avi@redhat.com>2009-12-03 02:32:25 -0500
commit3cfc3092f40bc37c57ba556cfd8de4218f2135ab (patch)
tree562d61febfe7d3c99ea08e376b3f3c016cff613c /arch/x86
parent65ac7264043740572ba804edca03c374d70427c9 (diff)
KVM: x86: Add KVM_GET/SET_VCPU_EVENTS
This new IOCTL exports all yet user-invisible states related to exceptions, interrupts, and NMIs. Together with appropriate user space changes, this fixes sporadic problems of vmsave/restore, live migration and system reset. [avi: future-proof abi by adding a flags field] Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm.h28
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/svm.c22
-rw-r--r--arch/x86/kvm/vmx.c30
-rw-r--r--arch/x86/kvm/x86.c77
5 files changed, 159 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index ef9b4b73cce4..950df434763f 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -20,6 +20,7 @@
20#define __KVM_HAVE_MCE 20#define __KVM_HAVE_MCE
21#define __KVM_HAVE_PIT_STATE2 21#define __KVM_HAVE_PIT_STATE2
22#define __KVM_HAVE_XEN_HVM 22#define __KVM_HAVE_XEN_HVM
23#define __KVM_HAVE_VCPU_EVENTS
23 24
24/* Architectural interrupt line count. */ 25/* Architectural interrupt line count. */
25#define KVM_NR_INTERRUPTS 256 26#define KVM_NR_INTERRUPTS 256
@@ -252,4 +253,31 @@ struct kvm_reinject_control {
252 __u8 pit_reinject; 253 __u8 pit_reinject;
253 __u8 reserved[31]; 254 __u8 reserved[31];
254}; 255};
256
257/* for KVM_GET/SET_VCPU_EVENTS */
258struct kvm_vcpu_events {
259 struct {
260 __u8 injected;
261 __u8 nr;
262 __u8 has_error_code;
263 __u8 pad;
264 __u32 error_code;
265 } exception;
266 struct {
267 __u8 injected;
268 __u8 nr;
269 __u8 soft;
270 __u8 pad;
271 } interrupt;
272 struct {
273 __u8 injected;
274 __u8 pending;
275 __u8 masked;
276 __u8 pad;
277 } nmi;
278 __u32 sipi_vector;
279 __u32 flags;
280 __u32 reserved[10];
281};
282
255#endif /* _ASM_X86_KVM_H */ 283#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 26a74b7bb6bc..06e085614dad 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -523,6 +523,8 @@ struct kvm_x86_ops {
523 bool has_error_code, u32 error_code); 523 bool has_error_code, u32 error_code);
524 int (*interrupt_allowed)(struct kvm_vcpu *vcpu); 524 int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
525 int (*nmi_allowed)(struct kvm_vcpu *vcpu); 525 int (*nmi_allowed)(struct kvm_vcpu *vcpu);
526 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
527 void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
526 void (*enable_nmi_window)(struct kvm_vcpu *vcpu); 528 void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
527 void (*enable_irq_window)(struct kvm_vcpu *vcpu); 529 void (*enable_irq_window)(struct kvm_vcpu *vcpu);
528 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); 530 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 34b700f9e498..3de0b37ec038 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2499,6 +2499,26 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
2499 !(svm->vcpu.arch.hflags & HF_NMI_MASK); 2499 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
2500} 2500}
2501 2501
2502static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
2503{
2504 struct vcpu_svm *svm = to_svm(vcpu);
2505
2506 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
2507}
2508
2509static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
2510{
2511 struct vcpu_svm *svm = to_svm(vcpu);
2512
2513 if (masked) {
2514 svm->vcpu.arch.hflags |= HF_NMI_MASK;
2515 svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
2516 } else {
2517 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
2518 svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
2519 }
2520}
2521
2502static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) 2522static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
2503{ 2523{
2504 struct vcpu_svm *svm = to_svm(vcpu); 2524 struct vcpu_svm *svm = to_svm(vcpu);
@@ -2946,6 +2966,8 @@ static struct kvm_x86_ops svm_x86_ops = {
2946 .queue_exception = svm_queue_exception, 2966 .queue_exception = svm_queue_exception,
2947 .interrupt_allowed = svm_interrupt_allowed, 2967 .interrupt_allowed = svm_interrupt_allowed,
2948 .nmi_allowed = svm_nmi_allowed, 2968 .nmi_allowed = svm_nmi_allowed,
2969 .get_nmi_mask = svm_get_nmi_mask,
2970 .set_nmi_mask = svm_set_nmi_mask,
2949 .enable_nmi_window = enable_nmi_window, 2971 .enable_nmi_window = enable_nmi_window,
2950 .enable_irq_window = enable_irq_window, 2972 .enable_irq_window = enable_irq_window,
2951 .update_cr8_intercept = update_cr8_intercept, 2973 .update_cr8_intercept = update_cr8_intercept,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 22fcd27a0b58..778f059ae423 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2639,6 +2639,34 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
2639 GUEST_INTR_STATE_NMI)); 2639 GUEST_INTR_STATE_NMI));
2640} 2640}
2641 2641
2642static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
2643{
2644 if (!cpu_has_virtual_nmis())
2645 return to_vmx(vcpu)->soft_vnmi_blocked;
2646 else
2647 return !!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
2648 GUEST_INTR_STATE_NMI);
2649}
2650
2651static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
2652{
2653 struct vcpu_vmx *vmx = to_vmx(vcpu);
2654
2655 if (!cpu_has_virtual_nmis()) {
2656 if (vmx->soft_vnmi_blocked != masked) {
2657 vmx->soft_vnmi_blocked = masked;
2658 vmx->vnmi_blocked_time = 0;
2659 }
2660 } else {
2661 if (masked)
2662 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
2663 GUEST_INTR_STATE_NMI);
2664 else
2665 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
2666 GUEST_INTR_STATE_NMI);
2667 }
2668}
2669
2642static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) 2670static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
2643{ 2671{
2644 return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && 2672 return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
@@ -3985,6 +4013,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
3985 .queue_exception = vmx_queue_exception, 4013 .queue_exception = vmx_queue_exception,
3986 .interrupt_allowed = vmx_interrupt_allowed, 4014 .interrupt_allowed = vmx_interrupt_allowed,
3987 .nmi_allowed = vmx_nmi_allowed, 4015 .nmi_allowed = vmx_nmi_allowed,
4016 .get_nmi_mask = vmx_get_nmi_mask,
4017 .set_nmi_mask = vmx_set_nmi_mask,
3988 .enable_nmi_window = enable_nmi_window, 4018 .enable_nmi_window = enable_nmi_window,
3989 .enable_irq_window = enable_irq_window, 4019 .enable_irq_window = enable_irq_window,
3990 .update_cr8_intercept = update_cr8_intercept, 4020 .update_cr8_intercept = update_cr8_intercept,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ba8958dca3c4..35eea30821d6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1342,6 +1342,7 @@ int kvm_dev_ioctl_check_extension(long ext)
1342 case KVM_CAP_SET_IDENTITY_MAP_ADDR: 1342 case KVM_CAP_SET_IDENTITY_MAP_ADDR:
1343 case KVM_CAP_XEN_HVM: 1343 case KVM_CAP_XEN_HVM:
1344 case KVM_CAP_ADJUST_CLOCK: 1344 case KVM_CAP_ADJUST_CLOCK:
1345 case KVM_CAP_VCPU_EVENTS:
1345 r = 1; 1346 r = 1;
1346 break; 1347 break;
1347 case KVM_CAP_COALESCED_MMIO: 1348 case KVM_CAP_COALESCED_MMIO:
@@ -1883,6 +1884,61 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
1883 return 0; 1884 return 0;
1884} 1885}
1885 1886
1887static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
1888 struct kvm_vcpu_events *events)
1889{
1890 vcpu_load(vcpu);
1891
1892 events->exception.injected = vcpu->arch.exception.pending;
1893 events->exception.nr = vcpu->arch.exception.nr;
1894 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
1895 events->exception.error_code = vcpu->arch.exception.error_code;
1896
1897 events->interrupt.injected = vcpu->arch.interrupt.pending;
1898 events->interrupt.nr = vcpu->arch.interrupt.nr;
1899 events->interrupt.soft = vcpu->arch.interrupt.soft;
1900
1901 events->nmi.injected = vcpu->arch.nmi_injected;
1902 events->nmi.pending = vcpu->arch.nmi_pending;
1903 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
1904
1905 events->sipi_vector = vcpu->arch.sipi_vector;
1906
1907 events->flags = 0;
1908
1909 vcpu_put(vcpu);
1910}
1911
1912static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
1913 struct kvm_vcpu_events *events)
1914{
1915 if (events->flags)
1916 return -EINVAL;
1917
1918 vcpu_load(vcpu);
1919
1920 vcpu->arch.exception.pending = events->exception.injected;
1921 vcpu->arch.exception.nr = events->exception.nr;
1922 vcpu->arch.exception.has_error_code = events->exception.has_error_code;
1923 vcpu->arch.exception.error_code = events->exception.error_code;
1924
1925 vcpu->arch.interrupt.pending = events->interrupt.injected;
1926 vcpu->arch.interrupt.nr = events->interrupt.nr;
1927 vcpu->arch.interrupt.soft = events->interrupt.soft;
1928 if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
1929 kvm_pic_clear_isr_ack(vcpu->kvm);
1930
1931 vcpu->arch.nmi_injected = events->nmi.injected;
1932 vcpu->arch.nmi_pending = events->nmi.pending;
1933 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
1934
1935 vcpu->arch.sipi_vector = events->sipi_vector;
1936
1937 vcpu_put(vcpu);
1938
1939 return 0;
1940}
1941
1886long kvm_arch_vcpu_ioctl(struct file *filp, 1942long kvm_arch_vcpu_ioctl(struct file *filp,
1887 unsigned int ioctl, unsigned long arg) 1943 unsigned int ioctl, unsigned long arg)
1888{ 1944{
@@ -2040,6 +2096,27 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
2040 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); 2096 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
2041 break; 2097 break;
2042 } 2098 }
2099 case KVM_GET_VCPU_EVENTS: {
2100 struct kvm_vcpu_events events;
2101
2102 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
2103
2104 r = -EFAULT;
2105 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
2106 break;
2107 r = 0;
2108 break;
2109 }
2110 case KVM_SET_VCPU_EVENTS: {
2111 struct kvm_vcpu_events events;
2112
2113 r = -EFAULT;
2114 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
2115 break;
2116
2117 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
2118 break;
2119 }
2043 default: 2120 default:
2044 r = -EINVAL; 2121 r = -EINVAL;
2045 } 2122 }