aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/kvm_host.h
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2008-12-15 07:52:10 -0500
committerAvi Kivity <avi@redhat.com>2009-03-24 05:02:49 -0400
commitd0bfb940ecabf0b44fb1fd80d8d60594e569e5ec (patch)
treeb5927d44937a54ec23d2a28d59db06a0262c0412 /arch/x86/include/asm/kvm_host.h
parent8ab2d2e231062814bd89bba2d6d92563190aa2bb (diff)
KVM: New guest debug interface
This rips out the support for KVM_DEBUG_GUEST and introduces a new IOCTL instead: KVM_SET_GUEST_DEBUG. The IOCTL payload consists of a generic part, controlling the "main switch" and the single-step feature. The arch specific part adds an x86 interface for intercepting both types of debug exceptions separately and re-injecting them when the host was not interested. Moveover, the foundation for guest debugging via debug registers is layed. To signal breakpoint events properly back to userland, an arch-specific data block is now returned along KVM_EXIT_DEBUG. For x86, the arch block contains the PC, the debug exception, and relevant debug registers to tell debug events properly apart. The availability of this new interface is signaled by KVM_CAP_SET_GUEST_DEBUG. Empty stubs for not yet supported archs are provided. Note that both SVM and VTX are supported, but only the latter was tested yet. Based on the experience with all those VTX corner case, I would be fairly surprised if SVM will work out of the box. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/include/asm/kvm_host.h')
-rw-r--r--arch/x86/include/asm/kvm_host.h9
1 files changed, 1 insertions, 8 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 53779309514a..c430cd580ee2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -135,12 +135,6 @@ enum {
135 135
136#define KVM_NR_MEM_OBJS 40 136#define KVM_NR_MEM_OBJS 40
137 137
138struct kvm_guest_debug {
139 int enabled;
140 unsigned long bp[4];
141 int singlestep;
142};
143
144/* 138/*
145 * We don't want allocation failures within the mmu code, so we preallocate 139 * We don't want allocation failures within the mmu code, so we preallocate
146 * enough memory for a single page fault in a cache. 140 * enough memory for a single page fault in a cache.
@@ -448,8 +442,7 @@ struct kvm_x86_ops {
448 void (*vcpu_put)(struct kvm_vcpu *vcpu); 442 void (*vcpu_put)(struct kvm_vcpu *vcpu);
449 443
450 int (*set_guest_debug)(struct kvm_vcpu *vcpu, 444 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
451 struct kvm_debug_guest *dbg); 445 struct kvm_guest_debug *dbg);
452 void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
453 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); 446 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
454 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 447 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
455 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 448 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);