diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2010-04-22 06:33:13 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-05-17 05:19:26 -0400 |
commit | ce7ddec4bbbc08f0c2901cc103773aed864b09fd (patch) | |
tree | d1cc4b3d0cd472ff30c3c796e6537dd41ec4e268 /arch/x86 | |
parent | c2c63a493924e09a1984d1374a0e60dfd54fc0b0 (diff) |
KVM: x86: Allow marking an exception as reinjected
This patch adds logic to kvm/x86 which allows to mark an
injected exception as reinjected. This allows to remove an
ugly hack from svm_complete_interrupts that prevented
exceptions from being reinjected at all in the nested case.
The hack was necessary because an reinjected exception into
the nested guest could cause a nested vmexit emulation. But
reinjected exceptions must not intercept. The downside of
the hack is that a exception that in injected could get
lost.
This patch fixes the problem and puts the code for it into
generic x86 files because. Nested-VMX will likely have the
same problem and could reuse the code.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 6 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 12 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 3 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 23 |
4 files changed, 32 insertions, 12 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 357573af974f..3f0007b076da 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -312,6 +312,7 @@ struct kvm_vcpu_arch { | |||
312 | struct kvm_queued_exception { | 312 | struct kvm_queued_exception { |
313 | bool pending; | 313 | bool pending; |
314 | bool has_error_code; | 314 | bool has_error_code; |
315 | bool reinject; | ||
315 | u8 nr; | 316 | u8 nr; |
316 | u32 error_code; | 317 | u32 error_code; |
317 | } exception; | 318 | } exception; |
@@ -514,7 +515,8 @@ struct kvm_x86_ops { | |||
514 | void (*set_irq)(struct kvm_vcpu *vcpu); | 515 | void (*set_irq)(struct kvm_vcpu *vcpu); |
515 | void (*set_nmi)(struct kvm_vcpu *vcpu); | 516 | void (*set_nmi)(struct kvm_vcpu *vcpu); |
516 | void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, | 517 | void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, |
517 | bool has_error_code, u32 error_code); | 518 | bool has_error_code, u32 error_code, |
519 | bool reinject); | ||
518 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); | 520 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); |
519 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); | 521 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); |
520 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); | 522 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); |
@@ -617,6 +619,8 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | |||
617 | 619 | ||
618 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); | 620 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
619 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | 621 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); |
622 | void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); | ||
623 | void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | ||
620 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, | 624 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, |
621 | u32 error_code); | 625 | u32 error_code); |
622 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); | 626 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 65fc11438b75..30e49fe7f8c0 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -338,7 +338,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
338 | } | 338 | } |
339 | 339 | ||
340 | static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | 340 | static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, |
341 | bool has_error_code, u32 error_code) | 341 | bool has_error_code, u32 error_code, |
342 | bool reinject) | ||
342 | { | 343 | { |
343 | struct vcpu_svm *svm = to_svm(vcpu); | 344 | struct vcpu_svm *svm = to_svm(vcpu); |
344 | 345 | ||
@@ -346,7 +347,8 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | |||
346 | * If we are within a nested VM we'd better #VMEXIT and let the guest | 347 | * If we are within a nested VM we'd better #VMEXIT and let the guest |
347 | * handle the exception | 348 | * handle the exception |
348 | */ | 349 | */ |
349 | if (nested_svm_check_exception(svm, nr, has_error_code, error_code)) | 350 | if (!reinject && |
351 | nested_svm_check_exception(svm, nr, has_error_code, error_code)) | ||
350 | return; | 352 | return; |
351 | 353 | ||
352 | if (nr == BP_VECTOR && !svm_has(SVM_FEATURE_NRIP)) { | 354 | if (nr == BP_VECTOR && !svm_has(SVM_FEATURE_NRIP)) { |
@@ -2918,8 +2920,6 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) | |||
2918 | svm->vcpu.arch.nmi_injected = true; | 2920 | svm->vcpu.arch.nmi_injected = true; |
2919 | break; | 2921 | break; |
2920 | case SVM_EXITINTINFO_TYPE_EXEPT: | 2922 | case SVM_EXITINTINFO_TYPE_EXEPT: |
2921 | if (is_nested(svm)) | ||
2922 | break; | ||
2923 | /* | 2923 | /* |
2924 | * In case of software exceptions, do not reinject the vector, | 2924 | * In case of software exceptions, do not reinject the vector, |
2925 | * but re-execute the instruction instead. Rewind RIP first | 2925 | * but re-execute the instruction instead. Rewind RIP first |
@@ -2935,10 +2935,10 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) | |||
2935 | } | 2935 | } |
2936 | if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { | 2936 | if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { |
2937 | u32 err = svm->vmcb->control.exit_int_info_err; | 2937 | u32 err = svm->vmcb->control.exit_int_info_err; |
2938 | kvm_queue_exception_e(&svm->vcpu, vector, err); | 2938 | kvm_requeue_exception_e(&svm->vcpu, vector, err); |
2939 | 2939 | ||
2940 | } else | 2940 | } else |
2941 | kvm_queue_exception(&svm->vcpu, vector); | 2941 | kvm_requeue_exception(&svm->vcpu, vector); |
2942 | break; | 2942 | break; |
2943 | case SVM_EXITINTINFO_TYPE_INTR: | 2943 | case SVM_EXITINTINFO_TYPE_INTR: |
2944 | kvm_queue_interrupt(&svm->vcpu, vector, false); | 2944 | kvm_queue_interrupt(&svm->vcpu, vector, false); |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9f8532b1fa9a..875b785228f6 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -919,7 +919,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
919 | } | 919 | } |
920 | 920 | ||
921 | static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | 921 | static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, |
922 | bool has_error_code, u32 error_code) | 922 | bool has_error_code, u32 error_code, |
923 | bool reinject) | ||
923 | { | 924 | { |
924 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 925 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
925 | u32 intr_info = nr | INTR_INFO_VALID_MASK; | 926 | u32 intr_info = nr | INTR_INFO_VALID_MASK; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6e6434332f21..6b2ce1d2d748 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -265,7 +265,8 @@ static int exception_class(int vector) | |||
265 | } | 265 | } |
266 | 266 | ||
267 | static void kvm_multiple_exception(struct kvm_vcpu *vcpu, | 267 | static void kvm_multiple_exception(struct kvm_vcpu *vcpu, |
268 | unsigned nr, bool has_error, u32 error_code) | 268 | unsigned nr, bool has_error, u32 error_code, |
269 | bool reinject) | ||
269 | { | 270 | { |
270 | u32 prev_nr; | 271 | u32 prev_nr; |
271 | int class1, class2; | 272 | int class1, class2; |
@@ -276,6 +277,7 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, | |||
276 | vcpu->arch.exception.has_error_code = has_error; | 277 | vcpu->arch.exception.has_error_code = has_error; |
277 | vcpu->arch.exception.nr = nr; | 278 | vcpu->arch.exception.nr = nr; |
278 | vcpu->arch.exception.error_code = error_code; | 279 | vcpu->arch.exception.error_code = error_code; |
280 | vcpu->arch.exception.reinject = true; | ||
279 | return; | 281 | return; |
280 | } | 282 | } |
281 | 283 | ||
@@ -304,10 +306,16 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, | |||
304 | 306 | ||
305 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) | 307 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) |
306 | { | 308 | { |
307 | kvm_multiple_exception(vcpu, nr, false, 0); | 309 | kvm_multiple_exception(vcpu, nr, false, 0, false); |
308 | } | 310 | } |
309 | EXPORT_SYMBOL_GPL(kvm_queue_exception); | 311 | EXPORT_SYMBOL_GPL(kvm_queue_exception); |
310 | 312 | ||
313 | void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) | ||
314 | { | ||
315 | kvm_multiple_exception(vcpu, nr, false, 0, true); | ||
316 | } | ||
317 | EXPORT_SYMBOL_GPL(kvm_requeue_exception); | ||
318 | |||
311 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr, | 319 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr, |
312 | u32 error_code) | 320 | u32 error_code) |
313 | { | 321 | { |
@@ -324,10 +332,16 @@ EXPORT_SYMBOL_GPL(kvm_inject_nmi); | |||
324 | 332 | ||
325 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) | 333 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) |
326 | { | 334 | { |
327 | kvm_multiple_exception(vcpu, nr, true, error_code); | 335 | kvm_multiple_exception(vcpu, nr, true, error_code, false); |
328 | } | 336 | } |
329 | EXPORT_SYMBOL_GPL(kvm_queue_exception_e); | 337 | EXPORT_SYMBOL_GPL(kvm_queue_exception_e); |
330 | 338 | ||
339 | void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) | ||
340 | { | ||
341 | kvm_multiple_exception(vcpu, nr, true, error_code, true); | ||
342 | } | ||
343 | EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); | ||
344 | |||
331 | /* | 345 | /* |
332 | * Checks if cpl <= required_cpl; if true, return true. Otherwise queue | 346 | * Checks if cpl <= required_cpl; if true, return true. Otherwise queue |
333 | * a #GP and return false. | 347 | * a #GP and return false. |
@@ -4408,7 +4422,8 @@ static void inject_pending_event(struct kvm_vcpu *vcpu) | |||
4408 | vcpu->arch.exception.error_code); | 4422 | vcpu->arch.exception.error_code); |
4409 | kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, | 4423 | kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, |
4410 | vcpu->arch.exception.has_error_code, | 4424 | vcpu->arch.exception.has_error_code, |
4411 | vcpu->arch.exception.error_code); | 4425 | vcpu->arch.exception.error_code, |
4426 | vcpu->arch.exception.reinject); | ||
4412 | return; | 4427 | return; |
4413 | } | 4428 | } |
4414 | 4429 | ||