aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2010-02-19 13:38:07 -0500
committerAvi Kivity <avi@redhat.com>2010-04-25 05:38:28 -0400
commit48005f64d0ea965d454e38b5181af4aba9bdef5b (patch)
tree15aa4fe79716e3089893c8e9d48d7e0b898d2693 /arch/x86/kvm/x86.c
parent03b82a30ea8b26199901b219848d706dbd70c609 (diff)
KVM: x86: Save&restore interrupt shadow mask
The interrupt shadow created by STI or MOV-SS-like operations is part of the VCPU state and must be preserved across migration. Transfer it in the spare padding field of kvm_vcpu_events.interrupt. As a side effect we now have to make vmx_set_interrupt_shadow robust against both shadow types being set. Give MOV SS a higher priority and skip STI in that case to avoid that VMX throws a fault on next entry. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2b1c9f2fb8dd..84ffd95ee198 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2111,6 +2111,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2111 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; 2111 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
2112 events->interrupt.nr = vcpu->arch.interrupt.nr; 2112 events->interrupt.nr = vcpu->arch.interrupt.nr;
2113 events->interrupt.soft = 0; 2113 events->interrupt.soft = 0;
2114 events->interrupt.shadow =
2115 kvm_x86_ops->get_interrupt_shadow(vcpu,
2116 KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
2114 2117
2115 events->nmi.injected = vcpu->arch.nmi_injected; 2118 events->nmi.injected = vcpu->arch.nmi_injected;
2116 events->nmi.pending = vcpu->arch.nmi_pending; 2119 events->nmi.pending = vcpu->arch.nmi_pending;
@@ -2119,7 +2122,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2119 events->sipi_vector = vcpu->arch.sipi_vector; 2122 events->sipi_vector = vcpu->arch.sipi_vector;
2120 2123
2121 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 2124 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2122 | KVM_VCPUEVENT_VALID_SIPI_VECTOR); 2125 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2126 | KVM_VCPUEVENT_VALID_SHADOW);
2123 2127
2124 vcpu_put(vcpu); 2128 vcpu_put(vcpu);
2125} 2129}
@@ -2128,7 +2132,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2128 struct kvm_vcpu_events *events) 2132 struct kvm_vcpu_events *events)
2129{ 2133{
2130 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING 2134 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
2131 | KVM_VCPUEVENT_VALID_SIPI_VECTOR)) 2135 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2136 | KVM_VCPUEVENT_VALID_SHADOW))
2132 return -EINVAL; 2137 return -EINVAL;
2133 2138
2134 vcpu_load(vcpu); 2139 vcpu_load(vcpu);
@@ -2143,6 +2148,9 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2143 vcpu->arch.interrupt.soft = events->interrupt.soft; 2148 vcpu->arch.interrupt.soft = events->interrupt.soft;
2144 if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm)) 2149 if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
2145 kvm_pic_clear_isr_ack(vcpu->kvm); 2150 kvm_pic_clear_isr_ack(vcpu->kvm);
2151 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
2152 kvm_x86_ops->set_interrupt_shadow(vcpu,
2153 events->interrupt.shadow);
2146 2154
2147 vcpu->arch.nmi_injected = events->nmi.injected; 2155 vcpu->arch.nmi_injected = events->nmi.injected;
2148 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) 2156 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)