diff options
author | Jan Kiszka <jan.kiszka@siemens.com> | 2008-12-15 07:52:10 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-03-24 05:02:48 -0400 |
commit | 8ab2d2e231062814bd89bba2d6d92563190aa2bb (patch) | |
tree | 7091abac4d05e6fc74bd8462cbd0fdaf06ec37e0 /arch | |
parent | d80174745ba3938bc6abb8f95ed670ac0631a182 (diff) |
KVM: VMX: Support for injecting software exceptions
VMX differentiates between processor and software generated exceptions
when injecting them into the guest. Extend vmx_queue_exception
accordingly (and refactor related constants) so that we can use this
service reliably for the new guest debugging framework.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/vmx.h | 3 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 35 |
2 files changed, 22 insertions, 16 deletions
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index d0238e6151d8..32159f034efc 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h | |||
@@ -270,8 +270,9 @@ enum vmcs_field { | |||
270 | 270 | ||
271 | #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ | 271 | #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ |
272 | #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ | 272 | #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ |
273 | #define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */ | 273 | #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ |
274 | #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ | 274 | #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ |
275 | #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ | ||
275 | 276 | ||
276 | /* GUEST_INTERRUPTIBILITY_INFO flags. */ | 277 | /* GUEST_INTERRUPTIBILITY_INFO flags. */ |
277 | #define GUEST_INTR_STATE_STI 0x00000001 | 278 | #define GUEST_INTR_STATE_STI 0x00000001 |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7611af576829..1d974c1eaa7d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -189,21 +189,21 @@ static inline int is_page_fault(u32 intr_info) | |||
189 | { | 189 | { |
190 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | 190 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | |
191 | INTR_INFO_VALID_MASK)) == | 191 | INTR_INFO_VALID_MASK)) == |
192 | (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); | 192 | (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); |
193 | } | 193 | } |
194 | 194 | ||
195 | static inline int is_no_device(u32 intr_info) | 195 | static inline int is_no_device(u32 intr_info) |
196 | { | 196 | { |
197 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | 197 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | |
198 | INTR_INFO_VALID_MASK)) == | 198 | INTR_INFO_VALID_MASK)) == |
199 | (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); | 199 | (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); |
200 | } | 200 | } |
201 | 201 | ||
202 | static inline int is_invalid_opcode(u32 intr_info) | 202 | static inline int is_invalid_opcode(u32 intr_info) |
203 | { | 203 | { |
204 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | 204 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | |
205 | INTR_INFO_VALID_MASK)) == | 205 | INTR_INFO_VALID_MASK)) == |
206 | (INTR_TYPE_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); | 206 | (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); |
207 | } | 207 | } |
208 | 208 | ||
209 | static inline int is_external_interrupt(u32 intr_info) | 209 | static inline int is_external_interrupt(u32 intr_info) |
@@ -747,29 +747,33 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | |||
747 | bool has_error_code, u32 error_code) | 747 | bool has_error_code, u32 error_code) |
748 | { | 748 | { |
749 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 749 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
750 | u32 intr_info = nr | INTR_INFO_VALID_MASK; | ||
750 | 751 | ||
751 | if (has_error_code) | 752 | if (has_error_code) { |
752 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); | 753 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); |
754 | intr_info |= INTR_INFO_DELIVER_CODE_MASK; | ||
755 | } | ||
753 | 756 | ||
754 | if (vcpu->arch.rmode.active) { | 757 | if (vcpu->arch.rmode.active) { |
755 | vmx->rmode.irq.pending = true; | 758 | vmx->rmode.irq.pending = true; |
756 | vmx->rmode.irq.vector = nr; | 759 | vmx->rmode.irq.vector = nr; |
757 | vmx->rmode.irq.rip = kvm_rip_read(vcpu); | 760 | vmx->rmode.irq.rip = kvm_rip_read(vcpu); |
758 | if (nr == BP_VECTOR) | 761 | if (nr == BP_VECTOR || nr == OF_VECTOR) |
759 | vmx->rmode.irq.rip++; | 762 | vmx->rmode.irq.rip++; |
760 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | 763 | intr_info |= INTR_TYPE_SOFT_INTR; |
761 | nr | INTR_TYPE_SOFT_INTR | 764 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); |
762 | | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0) | ||
763 | | INTR_INFO_VALID_MASK); | ||
764 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); | 765 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); |
765 | kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); | 766 | kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); |
766 | return; | 767 | return; |
767 | } | 768 | } |
768 | 769 | ||
769 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | 770 | if (nr == BP_VECTOR || nr == OF_VECTOR) { |
770 | nr | INTR_TYPE_EXCEPTION | 771 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); |
771 | | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0) | 772 | intr_info |= INTR_TYPE_SOFT_EXCEPTION; |
772 | | INTR_INFO_VALID_MASK); | 773 | } else |
774 | intr_info |= INTR_TYPE_HARD_EXCEPTION; | ||
775 | |||
776 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); | ||
773 | } | 777 | } |
774 | 778 | ||
775 | static bool vmx_exception_injected(struct kvm_vcpu *vcpu) | 779 | static bool vmx_exception_injected(struct kvm_vcpu *vcpu) |
@@ -2650,7 +2654,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2650 | } | 2654 | } |
2651 | 2655 | ||
2652 | if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == | 2656 | if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == |
2653 | (INTR_TYPE_EXCEPTION | 1)) { | 2657 | (INTR_TYPE_HARD_EXCEPTION | 1)) { |
2654 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | 2658 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
2655 | return 0; | 2659 | return 0; |
2656 | } | 2660 | } |
@@ -3238,7 +3242,8 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) | |||
3238 | vmx->vcpu.arch.nmi_injected = false; | 3242 | vmx->vcpu.arch.nmi_injected = false; |
3239 | } | 3243 | } |
3240 | kvm_clear_exception_queue(&vmx->vcpu); | 3244 | kvm_clear_exception_queue(&vmx->vcpu); |
3241 | if (idtv_info_valid && type == INTR_TYPE_EXCEPTION) { | 3245 | if (idtv_info_valid && (type == INTR_TYPE_HARD_EXCEPTION || |
3246 | type == INTR_TYPE_SOFT_EXCEPTION)) { | ||
3242 | if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { | 3247 | if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { |
3243 | error = vmcs_read32(IDT_VECTORING_ERROR_CODE); | 3248 | error = vmcs_read32(IDT_VECTORING_ERROR_CODE); |
3244 | kvm_queue_exception_e(&vmx->vcpu, vector, error); | 3249 | kvm_queue_exception_e(&vmx->vcpu, vector, error); |