aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-02-21 11:04:26 -0500
committerAvi Kivity <avi@qumranet.com>2007-03-04 04:12:42 -0500
commitbccf2150fe62dda5fb09efa2f64d2a234694eb48 (patch)
treeb5e6fc6440b864ddd1c32c4cee1916a0c5484c63 /include
parentc5ea76600653b1a242321734435cb1c54778941a (diff)
KVM: Per-vcpu inodes
Allocate a distinct inode for every vcpu in a VM. This has the following benefits: - the filp cachelines are no longer bounced when f_count is incremented on every ioctl() - the API and internal code are distinctly clearer; for example, on the KVM_GET_REGS ioctl, there is no need to copy the vcpu number from userspace and then copy the registers back; the vcpu identity is derived from the fd used to make the call Right now the performance benefits are completely theoretical since (a) we don't support more than one vcpu per VM and (b) virtualization hardware inefficiencies completely everwhelm any cacheline bouncing effects. But both of these will change, and we need to prepare the API today. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/kvm.h38
1 files changed, 17 insertions, 21 deletions
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index d6e6635dbec1..7c9a4004af44 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -52,11 +52,10 @@ enum kvm_exit_reason {
52/* for KVM_RUN */ 52/* for KVM_RUN */
53struct kvm_run { 53struct kvm_run {
54 /* in */ 54 /* in */
55 __u32 vcpu;
56 __u32 emulated; /* skip current instruction */ 55 __u32 emulated; /* skip current instruction */
57 __u32 mmio_completed; /* mmio request completed */ 56 __u32 mmio_completed; /* mmio request completed */
58 __u8 request_interrupt_window; 57 __u8 request_interrupt_window;
59 __u8 padding1[3]; 58 __u8 padding1[7];
60 59
61 /* out */ 60 /* out */
62 __u32 exit_type; 61 __u32 exit_type;
@@ -111,10 +110,6 @@ struct kvm_run {
111 110
112/* for KVM_GET_REGS and KVM_SET_REGS */ 111/* for KVM_GET_REGS and KVM_SET_REGS */
113struct kvm_regs { 112struct kvm_regs {
114 /* in */
115 __u32 vcpu;
116 __u32 padding;
117
118 /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ 113 /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
119 __u64 rax, rbx, rcx, rdx; 114 __u64 rax, rbx, rcx, rdx;
120 __u64 rsi, rdi, rsp, rbp; 115 __u64 rsi, rdi, rsp, rbp;
@@ -141,10 +136,6 @@ struct kvm_dtable {
141 136
142/* for KVM_GET_SREGS and KVM_SET_SREGS */ 137/* for KVM_GET_SREGS and KVM_SET_SREGS */
143struct kvm_sregs { 138struct kvm_sregs {
144 /* in */
145 __u32 vcpu;
146 __u32 padding;
147
148 /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */ 139 /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
149 struct kvm_segment cs, ds, es, fs, gs, ss; 140 struct kvm_segment cs, ds, es, fs, gs, ss;
150 struct kvm_segment tr, ldt; 141 struct kvm_segment tr, ldt;
@@ -163,8 +154,8 @@ struct kvm_msr_entry {
163 154
164/* for KVM_GET_MSRS and KVM_SET_MSRS */ 155/* for KVM_GET_MSRS and KVM_SET_MSRS */
165struct kvm_msrs { 156struct kvm_msrs {
166 __u32 vcpu;
167 __u32 nmsrs; /* number of msrs in entries */ 157 __u32 nmsrs; /* number of msrs in entries */
158 __u32 pad;
168 159
169 struct kvm_msr_entry entries[0]; 160 struct kvm_msr_entry entries[0];
170}; 161};
@@ -179,8 +170,6 @@ struct kvm_msr_list {
179struct kvm_translation { 170struct kvm_translation {
180 /* in */ 171 /* in */
181 __u64 linear_address; 172 __u64 linear_address;
182 __u32 vcpu;
183 __u32 padding;
184 173
185 /* out */ 174 /* out */
186 __u64 physical_address; 175 __u64 physical_address;
@@ -193,7 +182,6 @@ struct kvm_translation {
193/* for KVM_INTERRUPT */ 182/* for KVM_INTERRUPT */
194struct kvm_interrupt { 183struct kvm_interrupt {
195 /* in */ 184 /* in */
196 __u32 vcpu;
197 __u32 irq; 185 __u32 irq;
198}; 186};
199 187
@@ -206,8 +194,8 @@ struct kvm_breakpoint {
206/* for KVM_DEBUG_GUEST */ 194/* for KVM_DEBUG_GUEST */
207struct kvm_debug_guest { 195struct kvm_debug_guest {
208 /* int */ 196 /* int */
209 __u32 vcpu;
210 __u32 enabled; 197 __u32 enabled;
198 __u32 pad;
211 struct kvm_breakpoint breakpoints[4]; 199 struct kvm_breakpoint breakpoints[4];
212 __u32 singlestep; 200 __u32 singlestep;
213}; 201};
@@ -234,18 +222,26 @@ struct kvm_dirty_log {
234/* 222/*
235 * ioctls for VM fds 223 * ioctls for VM fds
236 */ 224 */
225#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 10, struct kvm_memory_region)
226/*
227 * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
228 * a vcpu fd.
229 */
230#define KVM_CREATE_VCPU _IOW(KVMIO, 11, int)
231#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 12, struct kvm_dirty_log)
232
233/*
234 * ioctls for vcpu fds
235 */
237#define KVM_RUN _IOWR(KVMIO, 2, struct kvm_run) 236#define KVM_RUN _IOWR(KVMIO, 2, struct kvm_run)
238#define KVM_GET_REGS _IOWR(KVMIO, 3, struct kvm_regs) 237#define KVM_GET_REGS _IOR(KVMIO, 3, struct kvm_regs)
239#define KVM_SET_REGS _IOW(KVMIO, 4, struct kvm_regs) 238#define KVM_SET_REGS _IOW(KVMIO, 4, struct kvm_regs)
240#define KVM_GET_SREGS _IOWR(KVMIO, 5, struct kvm_sregs) 239#define KVM_GET_SREGS _IOR(KVMIO, 5, struct kvm_sregs)
241#define KVM_SET_SREGS _IOW(KVMIO, 6, struct kvm_sregs) 240#define KVM_SET_SREGS _IOW(KVMIO, 6, struct kvm_sregs)
242#define KVM_TRANSLATE _IOWR(KVMIO, 7, struct kvm_translation) 241#define KVM_TRANSLATE _IOWR(KVMIO, 7, struct kvm_translation)
243#define KVM_INTERRUPT _IOW(KVMIO, 8, struct kvm_interrupt) 242#define KVM_INTERRUPT _IOW(KVMIO, 8, struct kvm_interrupt)
244#define KVM_DEBUG_GUEST _IOW(KVMIO, 9, struct kvm_debug_guest) 243#define KVM_DEBUG_GUEST _IOW(KVMIO, 9, struct kvm_debug_guest)
245#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 10, struct kvm_memory_region)
246#define KVM_CREATE_VCPU _IOW(KVMIO, 11, int /* vcpu_slot */)
247#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 12, struct kvm_dirty_log)
248#define KVM_GET_MSRS _IOWR(KVMIO, 13, struct kvm_msrs) 244#define KVM_GET_MSRS _IOWR(KVMIO, 13, struct kvm_msrs)
249#define KVM_SET_MSRS _IOWR(KVMIO, 14, struct kvm_msrs) 245#define KVM_SET_MSRS _IOW(KVMIO, 14, struct kvm_msrs)
250 246
251#endif 247#endif