diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2010-04-22 06:33:13 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-05-17 05:19:26 -0400 |
commit | ce7ddec4bbbc08f0c2901cc103773aed864b09fd (patch) | |
tree | d1cc4b3d0cd472ff30c3c796e6537dd41ec4e268 /arch/x86/kvm/x86.c | |
parent | c2c63a493924e09a1984d1374a0e60dfd54fc0b0 (diff) |
KVM: x86: Allow marking an exception as reinjected
This patch adds logic to kvm/x86 which allows to mark an
injected exception as reinjected. This allows to remove an
ugly hack from svm_complete_interrupts that prevented
exceptions from being reinjected at all in the nested case.
The hack was necessary because an reinjected exception into
the nested guest could cause a nested vmexit emulation. But
reinjected exceptions must not intercept. The downside of
the hack is that a exception that in injected could get
lost.
This patch fixes the problem and puts the code for it into
generic x86 files because. Nested-VMX will likely have the
same problem and could reuse the code.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 23 |
1 files changed, 19 insertions, 4 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6e6434332f2..6b2ce1d2d74 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -265,7 +265,8 @@ static int exception_class(int vector) | |||
265 | } | 265 | } |
266 | 266 | ||
267 | static void kvm_multiple_exception(struct kvm_vcpu *vcpu, | 267 | static void kvm_multiple_exception(struct kvm_vcpu *vcpu, |
268 | unsigned nr, bool has_error, u32 error_code) | 268 | unsigned nr, bool has_error, u32 error_code, |
269 | bool reinject) | ||
269 | { | 270 | { |
270 | u32 prev_nr; | 271 | u32 prev_nr; |
271 | int class1, class2; | 272 | int class1, class2; |
@@ -276,6 +277,7 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, | |||
276 | vcpu->arch.exception.has_error_code = has_error; | 277 | vcpu->arch.exception.has_error_code = has_error; |
277 | vcpu->arch.exception.nr = nr; | 278 | vcpu->arch.exception.nr = nr; |
278 | vcpu->arch.exception.error_code = error_code; | 279 | vcpu->arch.exception.error_code = error_code; |
280 | vcpu->arch.exception.reinject = true; | ||
279 | return; | 281 | return; |
280 | } | 282 | } |
281 | 283 | ||
@@ -304,10 +306,16 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, | |||
304 | 306 | ||
305 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) | 307 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) |
306 | { | 308 | { |
307 | kvm_multiple_exception(vcpu, nr, false, 0); | 309 | kvm_multiple_exception(vcpu, nr, false, 0, false); |
308 | } | 310 | } |
309 | EXPORT_SYMBOL_GPL(kvm_queue_exception); | 311 | EXPORT_SYMBOL_GPL(kvm_queue_exception); |
310 | 312 | ||
313 | void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) | ||
314 | { | ||
315 | kvm_multiple_exception(vcpu, nr, false, 0, true); | ||
316 | } | ||
317 | EXPORT_SYMBOL_GPL(kvm_requeue_exception); | ||
318 | |||
311 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr, | 319 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr, |
312 | u32 error_code) | 320 | u32 error_code) |
313 | { | 321 | { |
@@ -324,10 +332,16 @@ EXPORT_SYMBOL_GPL(kvm_inject_nmi); | |||
324 | 332 | ||
325 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) | 333 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) |
326 | { | 334 | { |
327 | kvm_multiple_exception(vcpu, nr, true, error_code); | 335 | kvm_multiple_exception(vcpu, nr, true, error_code, false); |
328 | } | 336 | } |
329 | EXPORT_SYMBOL_GPL(kvm_queue_exception_e); | 337 | EXPORT_SYMBOL_GPL(kvm_queue_exception_e); |
330 | 338 | ||
339 | void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) | ||
340 | { | ||
341 | kvm_multiple_exception(vcpu, nr, true, error_code, true); | ||
342 | } | ||
343 | EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); | ||
344 | |||
331 | /* | 345 | /* |
332 | * Checks if cpl <= required_cpl; if true, return true. Otherwise queue | 346 | * Checks if cpl <= required_cpl; if true, return true. Otherwise queue |
333 | * a #GP and return false. | 347 | * a #GP and return false. |
@@ -4408,7 +4422,8 @@ static void inject_pending_event(struct kvm_vcpu *vcpu) | |||
4408 | vcpu->arch.exception.error_code); | 4422 | vcpu->arch.exception.error_code); |
4409 | kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, | 4423 | kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, |
4410 | vcpu->arch.exception.has_error_code, | 4424 | vcpu->arch.exception.has_error_code, |
4411 | vcpu->arch.exception.error_code); | 4425 | vcpu->arch.exception.error_code, |
4426 | vcpu->arch.exception.reinject); | ||
4412 | return; | 4427 | return; |
4413 | } | 4428 | } |
4414 | 4429 | ||