diff options
author | Avi Kivity <avi@qumranet.com> | 2007-11-25 06:41:11 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:53:18 -0500 |
commit | 298101da2f507c13eaf179ee4507a7c0fe3e7b06 (patch) | |
tree | 2c0808964e5bc04812f0379b945fb187aaf901eb | |
parent | 4bf8ed8dd2781a5e7603a83f8ee1d4f5aa04ebc4 (diff) |
KVM: Generalize exception injection mechanism
Instead of each subarch doing its own thing, add an API for queuing an
injection, and manage failed exception injection centerally (i.e., if
an inject failed due to a shadow page fault, we need to requeue it).
Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r-- | drivers/kvm/svm.c | 21 | ||||
-rw-r--r-- | drivers/kvm/vmx.c | 20 | ||||
-rw-r--r-- | drivers/kvm/x86.c | 33 | ||||
-rw-r--r-- | drivers/kvm/x86.h | 13 |
4 files changed, 86 insertions, 1 deletions
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index c75c6b65b651..87072c647f28 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c | |||
@@ -188,6 +188,25 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
188 | vcpu->shadow_efer = efer; | 188 | vcpu->shadow_efer = efer; |
189 | } | 189 | } |
190 | 190 | ||
191 | static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | ||
192 | bool has_error_code, u32 error_code) | ||
193 | { | ||
194 | struct vcpu_svm *svm = to_svm(vcpu); | ||
195 | |||
196 | svm->vmcb->control.event_inj = nr | ||
197 | | SVM_EVTINJ_VALID | ||
198 | | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0) | ||
199 | | SVM_EVTINJ_TYPE_EXEPT; | ||
200 | svm->vmcb->control.event_inj_err = error_code; | ||
201 | } | ||
202 | |||
203 | static bool svm_exception_injected(struct kvm_vcpu *vcpu) | ||
204 | { | ||
205 | struct vcpu_svm *svm = to_svm(vcpu); | ||
206 | |||
207 | return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID); | ||
208 | } | ||
209 | |||
191 | static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) | 210 | static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) |
192 | { | 211 | { |
193 | struct vcpu_svm *svm = to_svm(vcpu); | 212 | struct vcpu_svm *svm = to_svm(vcpu); |
@@ -1712,6 +1731,8 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
1712 | .patch_hypercall = svm_patch_hypercall, | 1731 | .patch_hypercall = svm_patch_hypercall, |
1713 | .get_irq = svm_get_irq, | 1732 | .get_irq = svm_get_irq, |
1714 | .set_irq = svm_set_irq, | 1733 | .set_irq = svm_set_irq, |
1734 | .queue_exception = svm_queue_exception, | ||
1735 | .exception_injected = svm_exception_injected, | ||
1715 | .inject_pending_irq = svm_intr_assist, | 1736 | .inject_pending_irq = svm_intr_assist, |
1716 | .inject_pending_vectors = do_interrupt_requests, | 1737 | .inject_pending_vectors = do_interrupt_requests, |
1717 | 1738 | ||
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index fc5e7c8381ce..f382956f176a 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -595,6 +595,24 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
595 | vcpu->interrupt_window_open = 1; | 595 | vcpu->interrupt_window_open = 1; |
596 | } | 596 | } |
597 | 597 | ||
598 | static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | ||
599 | bool has_error_code, u32 error_code) | ||
600 | { | ||
601 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | ||
602 | nr | INTR_TYPE_EXCEPTION | ||
603 | | (has_error_code ? INTR_INFO_DELIEVER_CODE_MASK : 0) | ||
604 | | INTR_INFO_VALID_MASK); | ||
605 | if (has_error_code) | ||
606 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); | ||
607 | } | ||
608 | |||
609 | static bool vmx_exception_injected(struct kvm_vcpu *vcpu) | ||
610 | { | ||
611 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
612 | |||
613 | return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); | ||
614 | } | ||
615 | |||
598 | static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) | 616 | static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) |
599 | { | 617 | { |
600 | printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n", | 618 | printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n", |
@@ -2641,6 +2659,8 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
2641 | .patch_hypercall = vmx_patch_hypercall, | 2659 | .patch_hypercall = vmx_patch_hypercall, |
2642 | .get_irq = vmx_get_irq, | 2660 | .get_irq = vmx_get_irq, |
2643 | .set_irq = vmx_inject_irq, | 2661 | .set_irq = vmx_inject_irq, |
2662 | .queue_exception = vmx_queue_exception, | ||
2663 | .exception_injected = vmx_exception_injected, | ||
2644 | .inject_pending_irq = vmx_intr_assist, | 2664 | .inject_pending_irq = vmx_intr_assist, |
2645 | .inject_pending_vectors = do_interrupt_requests, | 2665 | .inject_pending_vectors = do_interrupt_requests, |
2646 | 2666 | ||
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c index c9e4b67bfb1b..11440d12a2d3 100644 --- a/drivers/kvm/x86.c +++ b/drivers/kvm/x86.c | |||
@@ -133,6 +133,32 @@ static void inject_gp(struct kvm_vcpu *vcpu) | |||
133 | kvm_x86_ops->inject_gp(vcpu, 0); | 133 | kvm_x86_ops->inject_gp(vcpu, 0); |
134 | } | 134 | } |
135 | 135 | ||
136 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) | ||
137 | { | ||
138 | WARN_ON(vcpu->exception.pending); | ||
139 | vcpu->exception.pending = true; | ||
140 | vcpu->exception.has_error_code = false; | ||
141 | vcpu->exception.nr = nr; | ||
142 | } | ||
143 | EXPORT_SYMBOL_GPL(kvm_queue_exception); | ||
144 | |||
145 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) | ||
146 | { | ||
147 | WARN_ON(vcpu->exception.pending); | ||
148 | vcpu->exception.pending = true; | ||
149 | vcpu->exception.has_error_code = true; | ||
150 | vcpu->exception.nr = nr; | ||
151 | vcpu->exception.error_code = error_code; | ||
152 | } | ||
153 | EXPORT_SYMBOL_GPL(kvm_queue_exception_e); | ||
154 | |||
155 | static void __queue_exception(struct kvm_vcpu *vcpu) | ||
156 | { | ||
157 | kvm_x86_ops->queue_exception(vcpu, vcpu->exception.nr, | ||
158 | vcpu->exception.has_error_code, | ||
159 | vcpu->exception.error_code); | ||
160 | } | ||
161 | |||
136 | /* | 162 | /* |
137 | * Load the pae pdptrs. Return true is they are all valid. | 163 | * Load the pae pdptrs. Return true is they are all valid. |
138 | */ | 164 | */ |
@@ -2370,7 +2396,9 @@ again: | |||
2370 | goto out; | 2396 | goto out; |
2371 | } | 2397 | } |
2372 | 2398 | ||
2373 | if (irqchip_in_kernel(vcpu->kvm)) | 2399 | if (vcpu->exception.pending) |
2400 | __queue_exception(vcpu); | ||
2401 | else if (irqchip_in_kernel(vcpu->kvm)) | ||
2374 | kvm_x86_ops->inject_pending_irq(vcpu); | 2402 | kvm_x86_ops->inject_pending_irq(vcpu); |
2375 | else | 2403 | else |
2376 | kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run); | 2404 | kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run); |
@@ -2409,6 +2437,9 @@ again: | |||
2409 | profile_hit(KVM_PROFILING, (void *)vcpu->rip); | 2437 | profile_hit(KVM_PROFILING, (void *)vcpu->rip); |
2410 | } | 2438 | } |
2411 | 2439 | ||
2440 | if (vcpu->exception.pending && kvm_x86_ops->exception_injected(vcpu)) | ||
2441 | vcpu->exception.pending = false; | ||
2442 | |||
2412 | r = kvm_x86_ops->handle_exit(kvm_run, vcpu); | 2443 | r = kvm_x86_ops->handle_exit(kvm_run, vcpu); |
2413 | 2444 | ||
2414 | if (r > 0) { | 2445 | if (r > 0) { |
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h index eed796402e3b..1e71668694ea 100644 --- a/drivers/kvm/x86.h +++ b/drivers/kvm/x86.h | |||
@@ -139,6 +139,13 @@ struct kvm_vcpu { | |||
139 | struct kvm_pio_request pio; | 139 | struct kvm_pio_request pio; |
140 | void *pio_data; | 140 | void *pio_data; |
141 | 141 | ||
142 | struct kvm_queued_exception { | ||
143 | bool pending; | ||
144 | bool has_error_code; | ||
145 | u8 nr; | ||
146 | u32 error_code; | ||
147 | } exception; | ||
148 | |||
142 | struct { | 149 | struct { |
143 | int active; | 150 | int active; |
144 | u8 save_iopl; | 151 | u8 save_iopl; |
@@ -224,6 +231,9 @@ struct kvm_x86_ops { | |||
224 | unsigned char *hypercall_addr); | 231 | unsigned char *hypercall_addr); |
225 | int (*get_irq)(struct kvm_vcpu *vcpu); | 232 | int (*get_irq)(struct kvm_vcpu *vcpu); |
226 | void (*set_irq)(struct kvm_vcpu *vcpu, int vec); | 233 | void (*set_irq)(struct kvm_vcpu *vcpu, int vec); |
234 | void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, | ||
235 | bool has_error_code, u32 error_code); | ||
236 | bool (*exception_injected)(struct kvm_vcpu *vcpu); | ||
227 | void (*inject_pending_irq)(struct kvm_vcpu *vcpu); | 237 | void (*inject_pending_irq)(struct kvm_vcpu *vcpu); |
228 | void (*inject_pending_vectors)(struct kvm_vcpu *vcpu, | 238 | void (*inject_pending_vectors)(struct kvm_vcpu *vcpu, |
229 | struct kvm_run *run); | 239 | struct kvm_run *run); |
@@ -294,6 +304,9 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); | |||
294 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | 304 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); |
295 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); | 305 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); |
296 | 306 | ||
307 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); | ||
308 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | ||
309 | |||
297 | void fx_init(struct kvm_vcpu *vcpu); | 310 | void fx_init(struct kvm_vcpu *vcpu); |
298 | 311 | ||
299 | int emulator_read_std(unsigned long addr, | 312 | int emulator_read_std(unsigned long addr, |