diff options
author | Carsten Otte <cotte@de.ibm.com> | 2007-10-11 13:16:52 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:52:52 -0500 |
commit | 313a3dc75da20630e549441932a7654223f8d72a (patch) | |
tree | 96af61f06c198834d49c886476ed30b418d853c0 /drivers/kvm/x86.c | |
parent | c4fcc2724628c6548748ec80a90b548fc300e81f (diff) |
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/x86.c')
-rw-r--r-- | drivers/kvm/x86.c | 219 |
1 files changed, 219 insertions, 0 deletions
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c index 437902cf178d..1fe209dd4caf 100644 --- a/drivers/kvm/x86.c +++ b/drivers/kvm/x86.c | |||
@@ -14,10 +14,18 @@ | |||
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include "kvm.h" | ||
17 | #include "x86.h" | 18 | #include "x86.h" |
19 | #include "irq.h" | ||
20 | |||
21 | #include <linux/kvm.h> | ||
22 | #include <linux/fs.h> | ||
23 | #include <linux/vmalloc.h> | ||
18 | 24 | ||
19 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
20 | 26 | ||
27 | #define MAX_IO_MSRS 256 | ||
28 | |||
21 | /* | 29 | /* |
22 | * List of msr numbers which we expose to userspace through KVM_GET_MSRS | 30 | * List of msr numbers which we expose to userspace through KVM_GET_MSRS |
23 | * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. | 31 | * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. |
@@ -40,6 +48,86 @@ static u32 emulated_msrs[] = { | |||
40 | MSR_IA32_MISC_ENABLE, | 48 | MSR_IA32_MISC_ENABLE, |
41 | }; | 49 | }; |
42 | 50 | ||
51 | /* | ||
52 | * Adapt set_msr() to msr_io()'s calling convention | ||
53 | */ | ||
54 | static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) | ||
55 | { | ||
56 | return kvm_set_msr(vcpu, index, *data); | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * Read or write a bunch of msrs. All parameters are kernel addresses. | ||
61 | * | ||
62 | * @return number of msrs set successfully. | ||
63 | */ | ||
64 | static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, | ||
65 | struct kvm_msr_entry *entries, | ||
66 | int (*do_msr)(struct kvm_vcpu *vcpu, | ||
67 | unsigned index, u64 *data)) | ||
68 | { | ||
69 | int i; | ||
70 | |||
71 | vcpu_load(vcpu); | ||
72 | |||
73 | for (i = 0; i < msrs->nmsrs; ++i) | ||
74 | if (do_msr(vcpu, entries[i].index, &entries[i].data)) | ||
75 | break; | ||
76 | |||
77 | vcpu_put(vcpu); | ||
78 | |||
79 | return i; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Read or write a bunch of msrs. Parameters are user addresses. | ||
84 | * | ||
85 | * @return number of msrs set successfully. | ||
86 | */ | ||
87 | static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, | ||
88 | int (*do_msr)(struct kvm_vcpu *vcpu, | ||
89 | unsigned index, u64 *data), | ||
90 | int writeback) | ||
91 | { | ||
92 | struct kvm_msrs msrs; | ||
93 | struct kvm_msr_entry *entries; | ||
94 | int r, n; | ||
95 | unsigned size; | ||
96 | |||
97 | r = -EFAULT; | ||
98 | if (copy_from_user(&msrs, user_msrs, sizeof msrs)) | ||
99 | goto out; | ||
100 | |||
101 | r = -E2BIG; | ||
102 | if (msrs.nmsrs >= MAX_IO_MSRS) | ||
103 | goto out; | ||
104 | |||
105 | r = -ENOMEM; | ||
106 | size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; | ||
107 | entries = vmalloc(size); | ||
108 | if (!entries) | ||
109 | goto out; | ||
110 | |||
111 | r = -EFAULT; | ||
112 | if (copy_from_user(entries, user_msrs->entries, size)) | ||
113 | goto out_free; | ||
114 | |||
115 | r = n = __msr_io(vcpu, &msrs, entries, do_msr); | ||
116 | if (r < 0) | ||
117 | goto out_free; | ||
118 | |||
119 | r = -EFAULT; | ||
120 | if (writeback && copy_to_user(user_msrs->entries, entries, size)) | ||
121 | goto out_free; | ||
122 | |||
123 | r = n; | ||
124 | |||
125 | out_free: | ||
126 | vfree(entries); | ||
127 | out: | ||
128 | return r; | ||
129 | } | ||
130 | |||
43 | long kvm_arch_dev_ioctl(struct file *filp, | 131 | long kvm_arch_dev_ioctl(struct file *filp, |
44 | unsigned int ioctl, unsigned long arg) | 132 | unsigned int ioctl, unsigned long arg) |
45 | { | 133 | { |
@@ -81,6 +169,137 @@ out: | |||
81 | return r; | 169 | return r; |
82 | } | 170 | } |
83 | 171 | ||
172 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
173 | { | ||
174 | kvm_x86_ops->vcpu_load(vcpu, cpu); | ||
175 | } | ||
176 | |||
177 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | ||
178 | { | ||
179 | kvm_x86_ops->vcpu_put(vcpu); | ||
180 | } | ||
181 | |||
182 | static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu) | ||
183 | { | ||
184 | u64 efer; | ||
185 | int i; | ||
186 | struct kvm_cpuid_entry *e, *entry; | ||
187 | |||
188 | rdmsrl(MSR_EFER, efer); | ||
189 | entry = NULL; | ||
190 | for (i = 0; i < vcpu->cpuid_nent; ++i) { | ||
191 | e = &vcpu->cpuid_entries[i]; | ||
192 | if (e->function == 0x80000001) { | ||
193 | entry = e; | ||
194 | break; | ||
195 | } | ||
196 | } | ||
197 | if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) { | ||
198 | entry->edx &= ~(1 << 20); | ||
199 | printk(KERN_INFO "kvm: guest NX capability removed\n"); | ||
200 | } | ||
201 | } | ||
202 | |||
203 | static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, | ||
204 | struct kvm_cpuid *cpuid, | ||
205 | struct kvm_cpuid_entry __user *entries) | ||
206 | { | ||
207 | int r; | ||
208 | |||
209 | r = -E2BIG; | ||
210 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) | ||
211 | goto out; | ||
212 | r = -EFAULT; | ||
213 | if (copy_from_user(&vcpu->cpuid_entries, entries, | ||
214 | cpuid->nent * sizeof(struct kvm_cpuid_entry))) | ||
215 | goto out; | ||
216 | vcpu->cpuid_nent = cpuid->nent; | ||
217 | cpuid_fix_nx_cap(vcpu); | ||
218 | return 0; | ||
219 | |||
220 | out: | ||
221 | return r; | ||
222 | } | ||
223 | |||
224 | static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, | ||
225 | struct kvm_lapic_state *s) | ||
226 | { | ||
227 | vcpu_load(vcpu); | ||
228 | memcpy(s->regs, vcpu->apic->regs, sizeof *s); | ||
229 | vcpu_put(vcpu); | ||
230 | |||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, | ||
235 | struct kvm_lapic_state *s) | ||
236 | { | ||
237 | vcpu_load(vcpu); | ||
238 | memcpy(vcpu->apic->regs, s->regs, sizeof *s); | ||
239 | kvm_apic_post_state_restore(vcpu); | ||
240 | vcpu_put(vcpu); | ||
241 | |||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | long kvm_arch_vcpu_ioctl(struct file *filp, | ||
246 | unsigned int ioctl, unsigned long arg) | ||
247 | { | ||
248 | struct kvm_vcpu *vcpu = filp->private_data; | ||
249 | void __user *argp = (void __user *)arg; | ||
250 | int r; | ||
251 | |||
252 | switch (ioctl) { | ||
253 | case KVM_GET_LAPIC: { | ||
254 | struct kvm_lapic_state lapic; | ||
255 | |||
256 | memset(&lapic, 0, sizeof lapic); | ||
257 | r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic); | ||
258 | if (r) | ||
259 | goto out; | ||
260 | r = -EFAULT; | ||
261 | if (copy_to_user(argp, &lapic, sizeof lapic)) | ||
262 | goto out; | ||
263 | r = 0; | ||
264 | break; | ||
265 | } | ||
266 | case KVM_SET_LAPIC: { | ||
267 | struct kvm_lapic_state lapic; | ||
268 | |||
269 | r = -EFAULT; | ||
270 | if (copy_from_user(&lapic, argp, sizeof lapic)) | ||
271 | goto out; | ||
272 | r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);; | ||
273 | if (r) | ||
274 | goto out; | ||
275 | r = 0; | ||
276 | break; | ||
277 | } | ||
278 | case KVM_SET_CPUID: { | ||
279 | struct kvm_cpuid __user *cpuid_arg = argp; | ||
280 | struct kvm_cpuid cpuid; | ||
281 | |||
282 | r = -EFAULT; | ||
283 | if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) | ||
284 | goto out; | ||
285 | r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); | ||
286 | if (r) | ||
287 | goto out; | ||
288 | break; | ||
289 | } | ||
290 | case KVM_GET_MSRS: | ||
291 | r = msr_io(vcpu, argp, kvm_get_msr, 1); | ||
292 | break; | ||
293 | case KVM_SET_MSRS: | ||
294 | r = msr_io(vcpu, argp, do_set_msr, 0); | ||
295 | break; | ||
296 | default: | ||
297 | r = -EINVAL; | ||
298 | } | ||
299 | out: | ||
300 | return r; | ||
301 | } | ||
302 | |||
84 | static __init void kvm_init_msr_list(void) | 303 | static __init void kvm_init_msr_list(void) |
85 | { | 304 | { |
86 | u32 dummy[2]; | 305 | u32 dummy[2]; |