diff options
author | Avi Kivity <avi@qumranet.com> | 2007-12-16 04:13:16 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 11:01:18 -0500 |
commit | 0fce5623ba248d3af0d7fb719d5ac367cc9d5192 (patch) | |
tree | b31021dcec46616c2c4f997b2d05d4879619d7e2 /virt/kvm | |
parent | edf884172e9828c6234b254208af04655855038d (diff) |
KVM: Move drivers/kvm/* to virt/kvm/
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/iodev.h | 63 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 1393 |
2 files changed, 1456 insertions, 0 deletions
diff --git a/virt/kvm/iodev.h b/virt/kvm/iodev.h new file mode 100644 index 000000000000..c14e642027b2 --- /dev/null +++ b/virt/kvm/iodev.h | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | */ | ||
15 | |||
16 | #ifndef __KVM_IODEV_H__ | ||
17 | #define __KVM_IODEV_H__ | ||
18 | |||
19 | #include <linux/kvm_types.h> | ||
20 | |||
21 | struct kvm_io_device { | ||
22 | void (*read)(struct kvm_io_device *this, | ||
23 | gpa_t addr, | ||
24 | int len, | ||
25 | void *val); | ||
26 | void (*write)(struct kvm_io_device *this, | ||
27 | gpa_t addr, | ||
28 | int len, | ||
29 | const void *val); | ||
30 | int (*in_range)(struct kvm_io_device *this, gpa_t addr); | ||
31 | void (*destructor)(struct kvm_io_device *this); | ||
32 | |||
33 | void *private; | ||
34 | }; | ||
35 | |||
36 | static inline void kvm_iodevice_read(struct kvm_io_device *dev, | ||
37 | gpa_t addr, | ||
38 | int len, | ||
39 | void *val) | ||
40 | { | ||
41 | dev->read(dev, addr, len, val); | ||
42 | } | ||
43 | |||
44 | static inline void kvm_iodevice_write(struct kvm_io_device *dev, | ||
45 | gpa_t addr, | ||
46 | int len, | ||
47 | const void *val) | ||
48 | { | ||
49 | dev->write(dev, addr, len, val); | ||
50 | } | ||
51 | |||
52 | static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr) | ||
53 | { | ||
54 | return dev->in_range(dev, addr); | ||
55 | } | ||
56 | |||
57 | static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) | ||
58 | { | ||
59 | if (dev->destructor) | ||
60 | dev->destructor(dev); | ||
61 | } | ||
62 | |||
63 | #endif /* __KVM_IODEV_H__ */ | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c new file mode 100644 index 000000000000..4026d7d64296 --- /dev/null +++ b/virt/kvm/kvm_main.c | |||
@@ -0,0 +1,1393 @@ | |||
1 | /* | ||
2 | * Kernel-based Virtual Machine driver for Linux | ||
3 | * | ||
4 | * This module enables machines with Intel VT-x extensions to run virtual | ||
5 | * machines without emulation or binary translation. | ||
6 | * | ||
7 | * Copyright (C) 2006 Qumranet, Inc. | ||
8 | * | ||
9 | * Authors: | ||
10 | * Avi Kivity <avi@qumranet.com> | ||
11 | * Yaniv Kamay <yaniv@qumranet.com> | ||
12 | * | ||
13 | * This work is licensed under the terms of the GNU GPL, version 2. See | ||
14 | * the COPYING file in the top-level directory. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include "iodev.h" | ||
19 | |||
20 | #include <linux/kvm_host.h> | ||
21 | #include <linux/kvm.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/percpu.h> | ||
25 | #include <linux/gfp.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/miscdevice.h> | ||
28 | #include <linux/vmalloc.h> | ||
29 | #include <linux/reboot.h> | ||
30 | #include <linux/debugfs.h> | ||
31 | #include <linux/highmem.h> | ||
32 | #include <linux/file.h> | ||
33 | #include <linux/sysdev.h> | ||
34 | #include <linux/cpu.h> | ||
35 | #include <linux/sched.h> | ||
36 | #include <linux/cpumask.h> | ||
37 | #include <linux/smp.h> | ||
38 | #include <linux/anon_inodes.h> | ||
39 | #include <linux/profile.h> | ||
40 | #include <linux/kvm_para.h> | ||
41 | #include <linux/pagemap.h> | ||
42 | #include <linux/mman.h> | ||
43 | |||
44 | #include <asm/processor.h> | ||
45 | #include <asm/io.h> | ||
46 | #include <asm/uaccess.h> | ||
47 | #include <asm/pgtable.h> | ||
48 | |||
49 | MODULE_AUTHOR("Qumranet"); | ||
50 | MODULE_LICENSE("GPL"); | ||
51 | |||
52 | DEFINE_SPINLOCK(kvm_lock); | ||
53 | LIST_HEAD(vm_list); | ||
54 | |||
55 | static cpumask_t cpus_hardware_enabled; | ||
56 | |||
57 | struct kmem_cache *kvm_vcpu_cache; | ||
58 | EXPORT_SYMBOL_GPL(kvm_vcpu_cache); | ||
59 | |||
60 | static __read_mostly struct preempt_ops kvm_preempt_ops; | ||
61 | |||
62 | static struct dentry *debugfs_dir; | ||
63 | |||
64 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, | ||
65 | unsigned long arg); | ||
66 | |||
67 | static inline int valid_vcpu(int n) | ||
68 | { | ||
69 | return likely(n >= 0 && n < KVM_MAX_VCPUS); | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * Switches to specified vcpu, until a matching vcpu_put() | ||
74 | */ | ||
75 | void vcpu_load(struct kvm_vcpu *vcpu) | ||
76 | { | ||
77 | int cpu; | ||
78 | |||
79 | mutex_lock(&vcpu->mutex); | ||
80 | cpu = get_cpu(); | ||
81 | preempt_notifier_register(&vcpu->preempt_notifier); | ||
82 | kvm_arch_vcpu_load(vcpu, cpu); | ||
83 | put_cpu(); | ||
84 | } | ||
85 | |||
86 | void vcpu_put(struct kvm_vcpu *vcpu) | ||
87 | { | ||
88 | preempt_disable(); | ||
89 | kvm_arch_vcpu_put(vcpu); | ||
90 | preempt_notifier_unregister(&vcpu->preempt_notifier); | ||
91 | preempt_enable(); | ||
92 | mutex_unlock(&vcpu->mutex); | ||
93 | } | ||
94 | |||
95 | static void ack_flush(void *_completed) | ||
96 | { | ||
97 | } | ||
98 | |||
99 | void kvm_flush_remote_tlbs(struct kvm *kvm) | ||
100 | { | ||
101 | int i, cpu; | ||
102 | cpumask_t cpus; | ||
103 | struct kvm_vcpu *vcpu; | ||
104 | |||
105 | cpus_clear(cpus); | ||
106 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
107 | vcpu = kvm->vcpus[i]; | ||
108 | if (!vcpu) | ||
109 | continue; | ||
110 | if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) | ||
111 | continue; | ||
112 | cpu = vcpu->cpu; | ||
113 | if (cpu != -1 && cpu != raw_smp_processor_id()) | ||
114 | cpu_set(cpu, cpus); | ||
115 | } | ||
116 | if (cpus_empty(cpus)) | ||
117 | return; | ||
118 | ++kvm->stat.remote_tlb_flush; | ||
119 | smp_call_function_mask(cpus, ack_flush, NULL, 1); | ||
120 | } | ||
121 | |||
122 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) | ||
123 | { | ||
124 | struct page *page; | ||
125 | int r; | ||
126 | |||
127 | mutex_init(&vcpu->mutex); | ||
128 | vcpu->cpu = -1; | ||
129 | vcpu->kvm = kvm; | ||
130 | vcpu->vcpu_id = id; | ||
131 | init_waitqueue_head(&vcpu->wq); | ||
132 | |||
133 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
134 | if (!page) { | ||
135 | r = -ENOMEM; | ||
136 | goto fail; | ||
137 | } | ||
138 | vcpu->run = page_address(page); | ||
139 | |||
140 | r = kvm_arch_vcpu_init(vcpu); | ||
141 | if (r < 0) | ||
142 | goto fail_free_run; | ||
143 | return 0; | ||
144 | |||
145 | fail_free_run: | ||
146 | free_page((unsigned long)vcpu->run); | ||
147 | fail: | ||
148 | return r; | ||
149 | } | ||
150 | EXPORT_SYMBOL_GPL(kvm_vcpu_init); | ||
151 | |||
152 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) | ||
153 | { | ||
154 | kvm_arch_vcpu_uninit(vcpu); | ||
155 | free_page((unsigned long)vcpu->run); | ||
156 | } | ||
157 | EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); | ||
158 | |||
159 | static struct kvm *kvm_create_vm(void) | ||
160 | { | ||
161 | struct kvm *kvm = kvm_arch_create_vm(); | ||
162 | |||
163 | if (IS_ERR(kvm)) | ||
164 | goto out; | ||
165 | |||
166 | kvm->mm = current->mm; | ||
167 | atomic_inc(&kvm->mm->mm_count); | ||
168 | kvm_io_bus_init(&kvm->pio_bus); | ||
169 | mutex_init(&kvm->lock); | ||
170 | kvm_io_bus_init(&kvm->mmio_bus); | ||
171 | spin_lock(&kvm_lock); | ||
172 | list_add(&kvm->vm_list, &vm_list); | ||
173 | spin_unlock(&kvm_lock); | ||
174 | out: | ||
175 | return kvm; | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * Free any memory in @free but not in @dont. | ||
180 | */ | ||
181 | static void kvm_free_physmem_slot(struct kvm_memory_slot *free, | ||
182 | struct kvm_memory_slot *dont) | ||
183 | { | ||
184 | if (!dont || free->rmap != dont->rmap) | ||
185 | vfree(free->rmap); | ||
186 | |||
187 | if (!dont || free->dirty_bitmap != dont->dirty_bitmap) | ||
188 | vfree(free->dirty_bitmap); | ||
189 | |||
190 | free->npages = 0; | ||
191 | free->dirty_bitmap = NULL; | ||
192 | free->rmap = NULL; | ||
193 | } | ||
194 | |||
195 | void kvm_free_physmem(struct kvm *kvm) | ||
196 | { | ||
197 | int i; | ||
198 | |||
199 | for (i = 0; i < kvm->nmemslots; ++i) | ||
200 | kvm_free_physmem_slot(&kvm->memslots[i], NULL); | ||
201 | } | ||
202 | |||
203 | static void kvm_destroy_vm(struct kvm *kvm) | ||
204 | { | ||
205 | struct mm_struct *mm = kvm->mm; | ||
206 | |||
207 | spin_lock(&kvm_lock); | ||
208 | list_del(&kvm->vm_list); | ||
209 | spin_unlock(&kvm_lock); | ||
210 | kvm_io_bus_destroy(&kvm->pio_bus); | ||
211 | kvm_io_bus_destroy(&kvm->mmio_bus); | ||
212 | kvm_arch_destroy_vm(kvm); | ||
213 | mmdrop(mm); | ||
214 | } | ||
215 | |||
216 | static int kvm_vm_release(struct inode *inode, struct file *filp) | ||
217 | { | ||
218 | struct kvm *kvm = filp->private_data; | ||
219 | |||
220 | kvm_destroy_vm(kvm); | ||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * Allocate some memory and give it an address in the guest physical address | ||
226 | * space. | ||
227 | * | ||
228 | * Discontiguous memory is allowed, mostly for framebuffers. | ||
229 | * | ||
230 | * Must be called holding kvm->lock. | ||
231 | */ | ||
232 | int __kvm_set_memory_region(struct kvm *kvm, | ||
233 | struct kvm_userspace_memory_region *mem, | ||
234 | int user_alloc) | ||
235 | { | ||
236 | int r; | ||
237 | gfn_t base_gfn; | ||
238 | unsigned long npages; | ||
239 | unsigned long i; | ||
240 | struct kvm_memory_slot *memslot; | ||
241 | struct kvm_memory_slot old, new; | ||
242 | |||
243 | r = -EINVAL; | ||
244 | /* General sanity checks */ | ||
245 | if (mem->memory_size & (PAGE_SIZE - 1)) | ||
246 | goto out; | ||
247 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) | ||
248 | goto out; | ||
249 | if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) | ||
250 | goto out; | ||
251 | if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) | ||
252 | goto out; | ||
253 | |||
254 | memslot = &kvm->memslots[mem->slot]; | ||
255 | base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; | ||
256 | npages = mem->memory_size >> PAGE_SHIFT; | ||
257 | |||
258 | if (!npages) | ||
259 | mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; | ||
260 | |||
261 | new = old = *memslot; | ||
262 | |||
263 | new.base_gfn = base_gfn; | ||
264 | new.npages = npages; | ||
265 | new.flags = mem->flags; | ||
266 | |||
267 | /* Disallow changing a memory slot's size. */ | ||
268 | r = -EINVAL; | ||
269 | if (npages && old.npages && npages != old.npages) | ||
270 | goto out_free; | ||
271 | |||
272 | /* Check for overlaps */ | ||
273 | r = -EEXIST; | ||
274 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { | ||
275 | struct kvm_memory_slot *s = &kvm->memslots[i]; | ||
276 | |||
277 | if (s == memslot) | ||
278 | continue; | ||
279 | if (!((base_gfn + npages <= s->base_gfn) || | ||
280 | (base_gfn >= s->base_gfn + s->npages))) | ||
281 | goto out_free; | ||
282 | } | ||
283 | |||
284 | /* Free page dirty bitmap if unneeded */ | ||
285 | if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) | ||
286 | new.dirty_bitmap = NULL; | ||
287 | |||
288 | r = -ENOMEM; | ||
289 | |||
290 | /* Allocate if a slot is being created */ | ||
291 | if (npages && !new.rmap) { | ||
292 | new.rmap = vmalloc(npages * sizeof(struct page *)); | ||
293 | |||
294 | if (!new.rmap) | ||
295 | goto out_free; | ||
296 | |||
297 | memset(new.rmap, 0, npages * sizeof(*new.rmap)); | ||
298 | |||
299 | new.user_alloc = user_alloc; | ||
300 | new.userspace_addr = mem->userspace_addr; | ||
301 | } | ||
302 | |||
303 | /* Allocate page dirty bitmap if needed */ | ||
304 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { | ||
305 | unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; | ||
306 | |||
307 | new.dirty_bitmap = vmalloc(dirty_bytes); | ||
308 | if (!new.dirty_bitmap) | ||
309 | goto out_free; | ||
310 | memset(new.dirty_bitmap, 0, dirty_bytes); | ||
311 | } | ||
312 | |||
313 | if (mem->slot >= kvm->nmemslots) | ||
314 | kvm->nmemslots = mem->slot + 1; | ||
315 | |||
316 | *memslot = new; | ||
317 | |||
318 | r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); | ||
319 | if (r) { | ||
320 | *memslot = old; | ||
321 | goto out_free; | ||
322 | } | ||
323 | |||
324 | kvm_free_physmem_slot(&old, &new); | ||
325 | return 0; | ||
326 | |||
327 | out_free: | ||
328 | kvm_free_physmem_slot(&new, &old); | ||
329 | out: | ||
330 | return r; | ||
331 | |||
332 | } | ||
333 | EXPORT_SYMBOL_GPL(__kvm_set_memory_region); | ||
334 | |||
335 | int kvm_set_memory_region(struct kvm *kvm, | ||
336 | struct kvm_userspace_memory_region *mem, | ||
337 | int user_alloc) | ||
338 | { | ||
339 | int r; | ||
340 | |||
341 | mutex_lock(&kvm->lock); | ||
342 | r = __kvm_set_memory_region(kvm, mem, user_alloc); | ||
343 | mutex_unlock(&kvm->lock); | ||
344 | return r; | ||
345 | } | ||
346 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); | ||
347 | |||
348 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | ||
349 | struct | ||
350 | kvm_userspace_memory_region *mem, | ||
351 | int user_alloc) | ||
352 | { | ||
353 | if (mem->slot >= KVM_MEMORY_SLOTS) | ||
354 | return -EINVAL; | ||
355 | return kvm_set_memory_region(kvm, mem, user_alloc); | ||
356 | } | ||
357 | |||
358 | int kvm_get_dirty_log(struct kvm *kvm, | ||
359 | struct kvm_dirty_log *log, int *is_dirty) | ||
360 | { | ||
361 | struct kvm_memory_slot *memslot; | ||
362 | int r, i; | ||
363 | int n; | ||
364 | unsigned long any = 0; | ||
365 | |||
366 | r = -EINVAL; | ||
367 | if (log->slot >= KVM_MEMORY_SLOTS) | ||
368 | goto out; | ||
369 | |||
370 | memslot = &kvm->memslots[log->slot]; | ||
371 | r = -ENOENT; | ||
372 | if (!memslot->dirty_bitmap) | ||
373 | goto out; | ||
374 | |||
375 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | ||
376 | |||
377 | for (i = 0; !any && i < n/sizeof(long); ++i) | ||
378 | any = memslot->dirty_bitmap[i]; | ||
379 | |||
380 | r = -EFAULT; | ||
381 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) | ||
382 | goto out; | ||
383 | |||
384 | if (any) | ||
385 | *is_dirty = 1; | ||
386 | |||
387 | r = 0; | ||
388 | out: | ||
389 | return r; | ||
390 | } | ||
391 | |||
392 | int is_error_page(struct page *page) | ||
393 | { | ||
394 | return page == bad_page; | ||
395 | } | ||
396 | EXPORT_SYMBOL_GPL(is_error_page); | ||
397 | |||
398 | static inline unsigned long bad_hva(void) | ||
399 | { | ||
400 | return PAGE_OFFSET; | ||
401 | } | ||
402 | |||
403 | int kvm_is_error_hva(unsigned long addr) | ||
404 | { | ||
405 | return addr == bad_hva(); | ||
406 | } | ||
407 | EXPORT_SYMBOL_GPL(kvm_is_error_hva); | ||
408 | |||
409 | static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn) | ||
410 | { | ||
411 | int i; | ||
412 | |||
413 | for (i = 0; i < kvm->nmemslots; ++i) { | ||
414 | struct kvm_memory_slot *memslot = &kvm->memslots[i]; | ||
415 | |||
416 | if (gfn >= memslot->base_gfn | ||
417 | && gfn < memslot->base_gfn + memslot->npages) | ||
418 | return memslot; | ||
419 | } | ||
420 | return NULL; | ||
421 | } | ||
422 | |||
423 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) | ||
424 | { | ||
425 | gfn = unalias_gfn(kvm, gfn); | ||
426 | return __gfn_to_memslot(kvm, gfn); | ||
427 | } | ||
428 | |||
429 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) | ||
430 | { | ||
431 | int i; | ||
432 | |||
433 | gfn = unalias_gfn(kvm, gfn); | ||
434 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { | ||
435 | struct kvm_memory_slot *memslot = &kvm->memslots[i]; | ||
436 | |||
437 | if (gfn >= memslot->base_gfn | ||
438 | && gfn < memslot->base_gfn + memslot->npages) | ||
439 | return 1; | ||
440 | } | ||
441 | return 0; | ||
442 | } | ||
443 | EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); | ||
444 | |||
445 | static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) | ||
446 | { | ||
447 | struct kvm_memory_slot *slot; | ||
448 | |||
449 | gfn = unalias_gfn(kvm, gfn); | ||
450 | slot = __gfn_to_memslot(kvm, gfn); | ||
451 | if (!slot) | ||
452 | return bad_hva(); | ||
453 | return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); | ||
454 | } | ||
455 | |||
456 | /* | ||
457 | * Requires current->mm->mmap_sem to be held | ||
458 | */ | ||
459 | static struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn) | ||
460 | { | ||
461 | struct page *page[1]; | ||
462 | unsigned long addr; | ||
463 | int npages; | ||
464 | |||
465 | might_sleep(); | ||
466 | |||
467 | addr = gfn_to_hva(kvm, gfn); | ||
468 | if (kvm_is_error_hva(addr)) { | ||
469 | get_page(bad_page); | ||
470 | return bad_page; | ||
471 | } | ||
472 | |||
473 | npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page, | ||
474 | NULL); | ||
475 | |||
476 | if (npages != 1) { | ||
477 | get_page(bad_page); | ||
478 | return bad_page; | ||
479 | } | ||
480 | |||
481 | return page[0]; | ||
482 | } | ||
483 | |||
484 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | ||
485 | { | ||
486 | struct page *page; | ||
487 | |||
488 | down_read(¤t->mm->mmap_sem); | ||
489 | page = __gfn_to_page(kvm, gfn); | ||
490 | up_read(¤t->mm->mmap_sem); | ||
491 | |||
492 | return page; | ||
493 | } | ||
494 | |||
495 | EXPORT_SYMBOL_GPL(gfn_to_page); | ||
496 | |||
497 | void kvm_release_page_clean(struct page *page) | ||
498 | { | ||
499 | put_page(page); | ||
500 | } | ||
501 | EXPORT_SYMBOL_GPL(kvm_release_page_clean); | ||
502 | |||
503 | void kvm_release_page_dirty(struct page *page) | ||
504 | { | ||
505 | if (!PageReserved(page)) | ||
506 | SetPageDirty(page); | ||
507 | put_page(page); | ||
508 | } | ||
509 | EXPORT_SYMBOL_GPL(kvm_release_page_dirty); | ||
510 | |||
511 | static int next_segment(unsigned long len, int offset) | ||
512 | { | ||
513 | if (len > PAGE_SIZE - offset) | ||
514 | return PAGE_SIZE - offset; | ||
515 | else | ||
516 | return len; | ||
517 | } | ||
518 | |||
519 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, | ||
520 | int len) | ||
521 | { | ||
522 | int r; | ||
523 | unsigned long addr; | ||
524 | |||
525 | addr = gfn_to_hva(kvm, gfn); | ||
526 | if (kvm_is_error_hva(addr)) | ||
527 | return -EFAULT; | ||
528 | r = copy_from_user(data, (void __user *)addr + offset, len); | ||
529 | if (r) | ||
530 | return -EFAULT; | ||
531 | return 0; | ||
532 | } | ||
533 | EXPORT_SYMBOL_GPL(kvm_read_guest_page); | ||
534 | |||
535 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) | ||
536 | { | ||
537 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
538 | int seg; | ||
539 | int offset = offset_in_page(gpa); | ||
540 | int ret; | ||
541 | |||
542 | while ((seg = next_segment(len, offset)) != 0) { | ||
543 | ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); | ||
544 | if (ret < 0) | ||
545 | return ret; | ||
546 | offset = 0; | ||
547 | len -= seg; | ||
548 | data += seg; | ||
549 | ++gfn; | ||
550 | } | ||
551 | return 0; | ||
552 | } | ||
553 | EXPORT_SYMBOL_GPL(kvm_read_guest); | ||
554 | |||
555 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, | ||
556 | int offset, int len) | ||
557 | { | ||
558 | int r; | ||
559 | unsigned long addr; | ||
560 | |||
561 | addr = gfn_to_hva(kvm, gfn); | ||
562 | if (kvm_is_error_hva(addr)) | ||
563 | return -EFAULT; | ||
564 | r = copy_to_user((void __user *)addr + offset, data, len); | ||
565 | if (r) | ||
566 | return -EFAULT; | ||
567 | mark_page_dirty(kvm, gfn); | ||
568 | return 0; | ||
569 | } | ||
570 | EXPORT_SYMBOL_GPL(kvm_write_guest_page); | ||
571 | |||
572 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | ||
573 | unsigned long len) | ||
574 | { | ||
575 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
576 | int seg; | ||
577 | int offset = offset_in_page(gpa); | ||
578 | int ret; | ||
579 | |||
580 | while ((seg = next_segment(len, offset)) != 0) { | ||
581 | ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); | ||
582 | if (ret < 0) | ||
583 | return ret; | ||
584 | offset = 0; | ||
585 | len -= seg; | ||
586 | data += seg; | ||
587 | ++gfn; | ||
588 | } | ||
589 | return 0; | ||
590 | } | ||
591 | |||
592 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) | ||
593 | { | ||
594 | return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len); | ||
595 | } | ||
596 | EXPORT_SYMBOL_GPL(kvm_clear_guest_page); | ||
597 | |||
598 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) | ||
599 | { | ||
600 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
601 | int seg; | ||
602 | int offset = offset_in_page(gpa); | ||
603 | int ret; | ||
604 | |||
605 | while ((seg = next_segment(len, offset)) != 0) { | ||
606 | ret = kvm_clear_guest_page(kvm, gfn, offset, seg); | ||
607 | if (ret < 0) | ||
608 | return ret; | ||
609 | offset = 0; | ||
610 | len -= seg; | ||
611 | ++gfn; | ||
612 | } | ||
613 | return 0; | ||
614 | } | ||
615 | EXPORT_SYMBOL_GPL(kvm_clear_guest); | ||
616 | |||
617 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn) | ||
618 | { | ||
619 | struct kvm_memory_slot *memslot; | ||
620 | |||
621 | gfn = unalias_gfn(kvm, gfn); | ||
622 | memslot = __gfn_to_memslot(kvm, gfn); | ||
623 | if (memslot && memslot->dirty_bitmap) { | ||
624 | unsigned long rel_gfn = gfn - memslot->base_gfn; | ||
625 | |||
626 | /* avoid RMW */ | ||
627 | if (!test_bit(rel_gfn, memslot->dirty_bitmap)) | ||
628 | set_bit(rel_gfn, memslot->dirty_bitmap); | ||
629 | } | ||
630 | } | ||
631 | |||
632 | /* | ||
633 | * The vCPU has executed a HLT instruction with in-kernel mode enabled. | ||
634 | */ | ||
635 | void kvm_vcpu_block(struct kvm_vcpu *vcpu) | ||
636 | { | ||
637 | DECLARE_WAITQUEUE(wait, current); | ||
638 | |||
639 | add_wait_queue(&vcpu->wq, &wait); | ||
640 | |||
641 | /* | ||
642 | * We will block until either an interrupt or a signal wakes us up | ||
643 | */ | ||
644 | while (!kvm_cpu_has_interrupt(vcpu) | ||
645 | && !signal_pending(current) | ||
646 | && !kvm_arch_vcpu_runnable(vcpu)) { | ||
647 | set_current_state(TASK_INTERRUPTIBLE); | ||
648 | vcpu_put(vcpu); | ||
649 | schedule(); | ||
650 | vcpu_load(vcpu); | ||
651 | } | ||
652 | |||
653 | __set_current_state(TASK_RUNNING); | ||
654 | remove_wait_queue(&vcpu->wq, &wait); | ||
655 | } | ||
656 | |||
657 | void kvm_resched(struct kvm_vcpu *vcpu) | ||
658 | { | ||
659 | if (!need_resched()) | ||
660 | return; | ||
661 | cond_resched(); | ||
662 | } | ||
663 | EXPORT_SYMBOL_GPL(kvm_resched); | ||
664 | |||
665 | static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
666 | { | ||
667 | struct kvm_vcpu *vcpu = vma->vm_file->private_data; | ||
668 | struct page *page; | ||
669 | |||
670 | if (vmf->pgoff == 0) | ||
671 | page = virt_to_page(vcpu->run); | ||
672 | else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) | ||
673 | page = virt_to_page(vcpu->arch.pio_data); | ||
674 | else | ||
675 | return VM_FAULT_SIGBUS; | ||
676 | get_page(page); | ||
677 | vmf->page = page; | ||
678 | return 0; | ||
679 | } | ||
680 | |||
681 | static struct vm_operations_struct kvm_vcpu_vm_ops = { | ||
682 | .fault = kvm_vcpu_fault, | ||
683 | }; | ||
684 | |||
685 | static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) | ||
686 | { | ||
687 | vma->vm_ops = &kvm_vcpu_vm_ops; | ||
688 | return 0; | ||
689 | } | ||
690 | |||
691 | static int kvm_vcpu_release(struct inode *inode, struct file *filp) | ||
692 | { | ||
693 | struct kvm_vcpu *vcpu = filp->private_data; | ||
694 | |||
695 | fput(vcpu->kvm->filp); | ||
696 | return 0; | ||
697 | } | ||
698 | |||
699 | static struct file_operations kvm_vcpu_fops = { | ||
700 | .release = kvm_vcpu_release, | ||
701 | .unlocked_ioctl = kvm_vcpu_ioctl, | ||
702 | .compat_ioctl = kvm_vcpu_ioctl, | ||
703 | .mmap = kvm_vcpu_mmap, | ||
704 | }; | ||
705 | |||
706 | /* | ||
707 | * Allocates an inode for the vcpu. | ||
708 | */ | ||
709 | static int create_vcpu_fd(struct kvm_vcpu *vcpu) | ||
710 | { | ||
711 | int fd, r; | ||
712 | struct inode *inode; | ||
713 | struct file *file; | ||
714 | |||
715 | r = anon_inode_getfd(&fd, &inode, &file, | ||
716 | "kvm-vcpu", &kvm_vcpu_fops, vcpu); | ||
717 | if (r) | ||
718 | return r; | ||
719 | atomic_inc(&vcpu->kvm->filp->f_count); | ||
720 | return fd; | ||
721 | } | ||
722 | |||
723 | /* | ||
724 | * Creates some virtual cpus. Good luck creating more than one. | ||
725 | */ | ||
726 | static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) | ||
727 | { | ||
728 | int r; | ||
729 | struct kvm_vcpu *vcpu; | ||
730 | |||
731 | if (!valid_vcpu(n)) | ||
732 | return -EINVAL; | ||
733 | |||
734 | vcpu = kvm_arch_vcpu_create(kvm, n); | ||
735 | if (IS_ERR(vcpu)) | ||
736 | return PTR_ERR(vcpu); | ||
737 | |||
738 | preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); | ||
739 | |||
740 | r = kvm_arch_vcpu_setup(vcpu); | ||
741 | if (r) | ||
742 | goto vcpu_destroy; | ||
743 | |||
744 | mutex_lock(&kvm->lock); | ||
745 | if (kvm->vcpus[n]) { | ||
746 | r = -EEXIST; | ||
747 | mutex_unlock(&kvm->lock); | ||
748 | goto vcpu_destroy; | ||
749 | } | ||
750 | kvm->vcpus[n] = vcpu; | ||
751 | mutex_unlock(&kvm->lock); | ||
752 | |||
753 | /* Now it's all set up, let userspace reach it */ | ||
754 | r = create_vcpu_fd(vcpu); | ||
755 | if (r < 0) | ||
756 | goto unlink; | ||
757 | return r; | ||
758 | |||
759 | unlink: | ||
760 | mutex_lock(&kvm->lock); | ||
761 | kvm->vcpus[n] = NULL; | ||
762 | mutex_unlock(&kvm->lock); | ||
763 | vcpu_destroy: | ||
764 | kvm_arch_vcpu_destroy(vcpu); | ||
765 | return r; | ||
766 | } | ||
767 | |||
768 | static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) | ||
769 | { | ||
770 | if (sigset) { | ||
771 | sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); | ||
772 | vcpu->sigset_active = 1; | ||
773 | vcpu->sigset = *sigset; | ||
774 | } else | ||
775 | vcpu->sigset_active = 0; | ||
776 | return 0; | ||
777 | } | ||
778 | |||
779 | static long kvm_vcpu_ioctl(struct file *filp, | ||
780 | unsigned int ioctl, unsigned long arg) | ||
781 | { | ||
782 | struct kvm_vcpu *vcpu = filp->private_data; | ||
783 | void __user *argp = (void __user *)arg; | ||
784 | int r; | ||
785 | |||
786 | if (vcpu->kvm->mm != current->mm) | ||
787 | return -EIO; | ||
788 | switch (ioctl) { | ||
789 | case KVM_RUN: | ||
790 | r = -EINVAL; | ||
791 | if (arg) | ||
792 | goto out; | ||
793 | r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); | ||
794 | break; | ||
795 | case KVM_GET_REGS: { | ||
796 | struct kvm_regs kvm_regs; | ||
797 | |||
798 | memset(&kvm_regs, 0, sizeof kvm_regs); | ||
799 | r = kvm_arch_vcpu_ioctl_get_regs(vcpu, &kvm_regs); | ||
800 | if (r) | ||
801 | goto out; | ||
802 | r = -EFAULT; | ||
803 | if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs)) | ||
804 | goto out; | ||
805 | r = 0; | ||
806 | break; | ||
807 | } | ||
808 | case KVM_SET_REGS: { | ||
809 | struct kvm_regs kvm_regs; | ||
810 | |||
811 | r = -EFAULT; | ||
812 | if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs)) | ||
813 | goto out; | ||
814 | r = kvm_arch_vcpu_ioctl_set_regs(vcpu, &kvm_regs); | ||
815 | if (r) | ||
816 | goto out; | ||
817 | r = 0; | ||
818 | break; | ||
819 | } | ||
820 | case KVM_GET_SREGS: { | ||
821 | struct kvm_sregs kvm_sregs; | ||
822 | |||
823 | memset(&kvm_sregs, 0, sizeof kvm_sregs); | ||
824 | r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs); | ||
825 | if (r) | ||
826 | goto out; | ||
827 | r = -EFAULT; | ||
828 | if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs)) | ||
829 | goto out; | ||
830 | r = 0; | ||
831 | break; | ||
832 | } | ||
833 | case KVM_SET_SREGS: { | ||
834 | struct kvm_sregs kvm_sregs; | ||
835 | |||
836 | r = -EFAULT; | ||
837 | if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) | ||
838 | goto out; | ||
839 | r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs); | ||
840 | if (r) | ||
841 | goto out; | ||
842 | r = 0; | ||
843 | break; | ||
844 | } | ||
845 | case KVM_TRANSLATE: { | ||
846 | struct kvm_translation tr; | ||
847 | |||
848 | r = -EFAULT; | ||
849 | if (copy_from_user(&tr, argp, sizeof tr)) | ||
850 | goto out; | ||
851 | r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); | ||
852 | if (r) | ||
853 | goto out; | ||
854 | r = -EFAULT; | ||
855 | if (copy_to_user(argp, &tr, sizeof tr)) | ||
856 | goto out; | ||
857 | r = 0; | ||
858 | break; | ||
859 | } | ||
860 | case KVM_DEBUG_GUEST: { | ||
861 | struct kvm_debug_guest dbg; | ||
862 | |||
863 | r = -EFAULT; | ||
864 | if (copy_from_user(&dbg, argp, sizeof dbg)) | ||
865 | goto out; | ||
866 | r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg); | ||
867 | if (r) | ||
868 | goto out; | ||
869 | r = 0; | ||
870 | break; | ||
871 | } | ||
872 | case KVM_SET_SIGNAL_MASK: { | ||
873 | struct kvm_signal_mask __user *sigmask_arg = argp; | ||
874 | struct kvm_signal_mask kvm_sigmask; | ||
875 | sigset_t sigset, *p; | ||
876 | |||
877 | p = NULL; | ||
878 | if (argp) { | ||
879 | r = -EFAULT; | ||
880 | if (copy_from_user(&kvm_sigmask, argp, | ||
881 | sizeof kvm_sigmask)) | ||
882 | goto out; | ||
883 | r = -EINVAL; | ||
884 | if (kvm_sigmask.len != sizeof sigset) | ||
885 | goto out; | ||
886 | r = -EFAULT; | ||
887 | if (copy_from_user(&sigset, sigmask_arg->sigset, | ||
888 | sizeof sigset)) | ||
889 | goto out; | ||
890 | p = &sigset; | ||
891 | } | ||
892 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); | ||
893 | break; | ||
894 | } | ||
895 | case KVM_GET_FPU: { | ||
896 | struct kvm_fpu fpu; | ||
897 | |||
898 | memset(&fpu, 0, sizeof fpu); | ||
899 | r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu); | ||
900 | if (r) | ||
901 | goto out; | ||
902 | r = -EFAULT; | ||
903 | if (copy_to_user(argp, &fpu, sizeof fpu)) | ||
904 | goto out; | ||
905 | r = 0; | ||
906 | break; | ||
907 | } | ||
908 | case KVM_SET_FPU: { | ||
909 | struct kvm_fpu fpu; | ||
910 | |||
911 | r = -EFAULT; | ||
912 | if (copy_from_user(&fpu, argp, sizeof fpu)) | ||
913 | goto out; | ||
914 | r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu); | ||
915 | if (r) | ||
916 | goto out; | ||
917 | r = 0; | ||
918 | break; | ||
919 | } | ||
920 | default: | ||
921 | r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); | ||
922 | } | ||
923 | out: | ||
924 | return r; | ||
925 | } | ||
926 | |||
927 | static long kvm_vm_ioctl(struct file *filp, | ||
928 | unsigned int ioctl, unsigned long arg) | ||
929 | { | ||
930 | struct kvm *kvm = filp->private_data; | ||
931 | void __user *argp = (void __user *)arg; | ||
932 | int r; | ||
933 | |||
934 | if (kvm->mm != current->mm) | ||
935 | return -EIO; | ||
936 | switch (ioctl) { | ||
937 | case KVM_CREATE_VCPU: | ||
938 | r = kvm_vm_ioctl_create_vcpu(kvm, arg); | ||
939 | if (r < 0) | ||
940 | goto out; | ||
941 | break; | ||
942 | case KVM_SET_USER_MEMORY_REGION: { | ||
943 | struct kvm_userspace_memory_region kvm_userspace_mem; | ||
944 | |||
945 | r = -EFAULT; | ||
946 | if (copy_from_user(&kvm_userspace_mem, argp, | ||
947 | sizeof kvm_userspace_mem)) | ||
948 | goto out; | ||
949 | |||
950 | r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); | ||
951 | if (r) | ||
952 | goto out; | ||
953 | break; | ||
954 | } | ||
955 | case KVM_GET_DIRTY_LOG: { | ||
956 | struct kvm_dirty_log log; | ||
957 | |||
958 | r = -EFAULT; | ||
959 | if (copy_from_user(&log, argp, sizeof log)) | ||
960 | goto out; | ||
961 | r = kvm_vm_ioctl_get_dirty_log(kvm, &log); | ||
962 | if (r) | ||
963 | goto out; | ||
964 | break; | ||
965 | } | ||
966 | default: | ||
967 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); | ||
968 | } | ||
969 | out: | ||
970 | return r; | ||
971 | } | ||
972 | |||
973 | static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
974 | { | ||
975 | struct kvm *kvm = vma->vm_file->private_data; | ||
976 | struct page *page; | ||
977 | |||
978 | if (!kvm_is_visible_gfn(kvm, vmf->pgoff)) | ||
979 | return VM_FAULT_SIGBUS; | ||
980 | /* current->mm->mmap_sem is already held so call lockless version */ | ||
981 | page = __gfn_to_page(kvm, vmf->pgoff); | ||
982 | if (is_error_page(page)) { | ||
983 | kvm_release_page_clean(page); | ||
984 | return VM_FAULT_SIGBUS; | ||
985 | } | ||
986 | vmf->page = page; | ||
987 | return 0; | ||
988 | } | ||
989 | |||
990 | static struct vm_operations_struct kvm_vm_vm_ops = { | ||
991 | .fault = kvm_vm_fault, | ||
992 | }; | ||
993 | |||
994 | static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) | ||
995 | { | ||
996 | vma->vm_ops = &kvm_vm_vm_ops; | ||
997 | return 0; | ||
998 | } | ||
999 | |||
1000 | static struct file_operations kvm_vm_fops = { | ||
1001 | .release = kvm_vm_release, | ||
1002 | .unlocked_ioctl = kvm_vm_ioctl, | ||
1003 | .compat_ioctl = kvm_vm_ioctl, | ||
1004 | .mmap = kvm_vm_mmap, | ||
1005 | }; | ||
1006 | |||
1007 | static int kvm_dev_ioctl_create_vm(void) | ||
1008 | { | ||
1009 | int fd, r; | ||
1010 | struct inode *inode; | ||
1011 | struct file *file; | ||
1012 | struct kvm *kvm; | ||
1013 | |||
1014 | kvm = kvm_create_vm(); | ||
1015 | if (IS_ERR(kvm)) | ||
1016 | return PTR_ERR(kvm); | ||
1017 | r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm); | ||
1018 | if (r) { | ||
1019 | kvm_destroy_vm(kvm); | ||
1020 | return r; | ||
1021 | } | ||
1022 | |||
1023 | kvm->filp = file; | ||
1024 | |||
1025 | return fd; | ||
1026 | } | ||
1027 | |||
1028 | static long kvm_dev_ioctl(struct file *filp, | ||
1029 | unsigned int ioctl, unsigned long arg) | ||
1030 | { | ||
1031 | void __user *argp = (void __user *)arg; | ||
1032 | long r = -EINVAL; | ||
1033 | |||
1034 | switch (ioctl) { | ||
1035 | case KVM_GET_API_VERSION: | ||
1036 | r = -EINVAL; | ||
1037 | if (arg) | ||
1038 | goto out; | ||
1039 | r = KVM_API_VERSION; | ||
1040 | break; | ||
1041 | case KVM_CREATE_VM: | ||
1042 | r = -EINVAL; | ||
1043 | if (arg) | ||
1044 | goto out; | ||
1045 | r = kvm_dev_ioctl_create_vm(); | ||
1046 | break; | ||
1047 | case KVM_CHECK_EXTENSION: | ||
1048 | r = kvm_dev_ioctl_check_extension((long)argp); | ||
1049 | break; | ||
1050 | case KVM_GET_VCPU_MMAP_SIZE: | ||
1051 | r = -EINVAL; | ||
1052 | if (arg) | ||
1053 | goto out; | ||
1054 | r = 2 * PAGE_SIZE; | ||
1055 | break; | ||
1056 | default: | ||
1057 | return kvm_arch_dev_ioctl(filp, ioctl, arg); | ||
1058 | } | ||
1059 | out: | ||
1060 | return r; | ||
1061 | } | ||
1062 | |||
1063 | static struct file_operations kvm_chardev_ops = { | ||
1064 | .unlocked_ioctl = kvm_dev_ioctl, | ||
1065 | .compat_ioctl = kvm_dev_ioctl, | ||
1066 | }; | ||
1067 | |||
1068 | static struct miscdevice kvm_dev = { | ||
1069 | KVM_MINOR, | ||
1070 | "kvm", | ||
1071 | &kvm_chardev_ops, | ||
1072 | }; | ||
1073 | |||
1074 | static void hardware_enable(void *junk) | ||
1075 | { | ||
1076 | int cpu = raw_smp_processor_id(); | ||
1077 | |||
1078 | if (cpu_isset(cpu, cpus_hardware_enabled)) | ||
1079 | return; | ||
1080 | cpu_set(cpu, cpus_hardware_enabled); | ||
1081 | kvm_arch_hardware_enable(NULL); | ||
1082 | } | ||
1083 | |||
1084 | static void hardware_disable(void *junk) | ||
1085 | { | ||
1086 | int cpu = raw_smp_processor_id(); | ||
1087 | |||
1088 | if (!cpu_isset(cpu, cpus_hardware_enabled)) | ||
1089 | return; | ||
1090 | cpu_clear(cpu, cpus_hardware_enabled); | ||
1091 | decache_vcpus_on_cpu(cpu); | ||
1092 | kvm_arch_hardware_disable(NULL); | ||
1093 | } | ||
1094 | |||
1095 | static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | ||
1096 | void *v) | ||
1097 | { | ||
1098 | int cpu = (long)v; | ||
1099 | |||
1100 | val &= ~CPU_TASKS_FROZEN; | ||
1101 | switch (val) { | ||
1102 | case CPU_DYING: | ||
1103 | printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", | ||
1104 | cpu); | ||
1105 | hardware_disable(NULL); | ||
1106 | break; | ||
1107 | case CPU_UP_CANCELED: | ||
1108 | printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", | ||
1109 | cpu); | ||
1110 | smp_call_function_single(cpu, hardware_disable, NULL, 0, 1); | ||
1111 | break; | ||
1112 | case CPU_ONLINE: | ||
1113 | printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", | ||
1114 | cpu); | ||
1115 | smp_call_function_single(cpu, hardware_enable, NULL, 0, 1); | ||
1116 | break; | ||
1117 | } | ||
1118 | return NOTIFY_OK; | ||
1119 | } | ||
1120 | |||
1121 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, | ||
1122 | void *v) | ||
1123 | { | ||
1124 | if (val == SYS_RESTART) { | ||
1125 | /* | ||
1126 | * Some (well, at least mine) BIOSes hang on reboot if | ||
1127 | * in vmx root mode. | ||
1128 | */ | ||
1129 | printk(KERN_INFO "kvm: exiting hardware virtualization\n"); | ||
1130 | on_each_cpu(hardware_disable, NULL, 0, 1); | ||
1131 | } | ||
1132 | return NOTIFY_OK; | ||
1133 | } | ||
1134 | |||
1135 | static struct notifier_block kvm_reboot_notifier = { | ||
1136 | .notifier_call = kvm_reboot, | ||
1137 | .priority = 0, | ||
1138 | }; | ||
1139 | |||
1140 | void kvm_io_bus_init(struct kvm_io_bus *bus) | ||
1141 | { | ||
1142 | memset(bus, 0, sizeof(*bus)); | ||
1143 | } | ||
1144 | |||
1145 | void kvm_io_bus_destroy(struct kvm_io_bus *bus) | ||
1146 | { | ||
1147 | int i; | ||
1148 | |||
1149 | for (i = 0; i < bus->dev_count; i++) { | ||
1150 | struct kvm_io_device *pos = bus->devs[i]; | ||
1151 | |||
1152 | kvm_iodevice_destructor(pos); | ||
1153 | } | ||
1154 | } | ||
1155 | |||
1156 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) | ||
1157 | { | ||
1158 | int i; | ||
1159 | |||
1160 | for (i = 0; i < bus->dev_count; i++) { | ||
1161 | struct kvm_io_device *pos = bus->devs[i]; | ||
1162 | |||
1163 | if (pos->in_range(pos, addr)) | ||
1164 | return pos; | ||
1165 | } | ||
1166 | |||
1167 | return NULL; | ||
1168 | } | ||
1169 | |||
1170 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev) | ||
1171 | { | ||
1172 | BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1)); | ||
1173 | |||
1174 | bus->devs[bus->dev_count++] = dev; | ||
1175 | } | ||
1176 | |||
1177 | static struct notifier_block kvm_cpu_notifier = { | ||
1178 | .notifier_call = kvm_cpu_hotplug, | ||
1179 | .priority = 20, /* must be > scheduler priority */ | ||
1180 | }; | ||
1181 | |||
1182 | static u64 vm_stat_get(void *_offset) | ||
1183 | { | ||
1184 | unsigned offset = (long)_offset; | ||
1185 | u64 total = 0; | ||
1186 | struct kvm *kvm; | ||
1187 | |||
1188 | spin_lock(&kvm_lock); | ||
1189 | list_for_each_entry(kvm, &vm_list, vm_list) | ||
1190 | total += *(u32 *)((void *)kvm + offset); | ||
1191 | spin_unlock(&kvm_lock); | ||
1192 | return total; | ||
1193 | } | ||
1194 | |||
1195 | DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); | ||
1196 | |||
1197 | static u64 vcpu_stat_get(void *_offset) | ||
1198 | { | ||
1199 | unsigned offset = (long)_offset; | ||
1200 | u64 total = 0; | ||
1201 | struct kvm *kvm; | ||
1202 | struct kvm_vcpu *vcpu; | ||
1203 | int i; | ||
1204 | |||
1205 | spin_lock(&kvm_lock); | ||
1206 | list_for_each_entry(kvm, &vm_list, vm_list) | ||
1207 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
1208 | vcpu = kvm->vcpus[i]; | ||
1209 | if (vcpu) | ||
1210 | total += *(u32 *)((void *)vcpu + offset); | ||
1211 | } | ||
1212 | spin_unlock(&kvm_lock); | ||
1213 | return total; | ||
1214 | } | ||
1215 | |||
1216 | DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); | ||
1217 | |||
1218 | static struct file_operations *stat_fops[] = { | ||
1219 | [KVM_STAT_VCPU] = &vcpu_stat_fops, | ||
1220 | [KVM_STAT_VM] = &vm_stat_fops, | ||
1221 | }; | ||
1222 | |||
1223 | static void kvm_init_debug(void) | ||
1224 | { | ||
1225 | struct kvm_stats_debugfs_item *p; | ||
1226 | |||
1227 | debugfs_dir = debugfs_create_dir("kvm", NULL); | ||
1228 | for (p = debugfs_entries; p->name; ++p) | ||
1229 | p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir, | ||
1230 | (void *)(long)p->offset, | ||
1231 | stat_fops[p->kind]); | ||
1232 | } | ||
1233 | |||
1234 | static void kvm_exit_debug(void) | ||
1235 | { | ||
1236 | struct kvm_stats_debugfs_item *p; | ||
1237 | |||
1238 | for (p = debugfs_entries; p->name; ++p) | ||
1239 | debugfs_remove(p->dentry); | ||
1240 | debugfs_remove(debugfs_dir); | ||
1241 | } | ||
1242 | |||
1243 | static int kvm_suspend(struct sys_device *dev, pm_message_t state) | ||
1244 | { | ||
1245 | hardware_disable(NULL); | ||
1246 | return 0; | ||
1247 | } | ||
1248 | |||
1249 | static int kvm_resume(struct sys_device *dev) | ||
1250 | { | ||
1251 | hardware_enable(NULL); | ||
1252 | return 0; | ||
1253 | } | ||
1254 | |||
1255 | static struct sysdev_class kvm_sysdev_class = { | ||
1256 | .name = "kvm", | ||
1257 | .suspend = kvm_suspend, | ||
1258 | .resume = kvm_resume, | ||
1259 | }; | ||
1260 | |||
1261 | static struct sys_device kvm_sysdev = { | ||
1262 | .id = 0, | ||
1263 | .cls = &kvm_sysdev_class, | ||
1264 | }; | ||
1265 | |||
1266 | struct page *bad_page; | ||
1267 | |||
1268 | static inline | ||
1269 | struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) | ||
1270 | { | ||
1271 | return container_of(pn, struct kvm_vcpu, preempt_notifier); | ||
1272 | } | ||
1273 | |||
1274 | static void kvm_sched_in(struct preempt_notifier *pn, int cpu) | ||
1275 | { | ||
1276 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); | ||
1277 | |||
1278 | kvm_arch_vcpu_load(vcpu, cpu); | ||
1279 | } | ||
1280 | |||
1281 | static void kvm_sched_out(struct preempt_notifier *pn, | ||
1282 | struct task_struct *next) | ||
1283 | { | ||
1284 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); | ||
1285 | |||
1286 | kvm_arch_vcpu_put(vcpu); | ||
1287 | } | ||
1288 | |||
1289 | int kvm_init(void *opaque, unsigned int vcpu_size, | ||
1290 | struct module *module) | ||
1291 | { | ||
1292 | int r; | ||
1293 | int cpu; | ||
1294 | |||
1295 | kvm_init_debug(); | ||
1296 | |||
1297 | r = kvm_arch_init(opaque); | ||
1298 | if (r) | ||
1299 | goto out_fail; | ||
1300 | |||
1301 | bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
1302 | |||
1303 | if (bad_page == NULL) { | ||
1304 | r = -ENOMEM; | ||
1305 | goto out; | ||
1306 | } | ||
1307 | |||
1308 | r = kvm_arch_hardware_setup(); | ||
1309 | if (r < 0) | ||
1310 | goto out_free_0; | ||
1311 | |||
1312 | for_each_online_cpu(cpu) { | ||
1313 | smp_call_function_single(cpu, | ||
1314 | kvm_arch_check_processor_compat, | ||
1315 | &r, 0, 1); | ||
1316 | if (r < 0) | ||
1317 | goto out_free_1; | ||
1318 | } | ||
1319 | |||
1320 | on_each_cpu(hardware_enable, NULL, 0, 1); | ||
1321 | r = register_cpu_notifier(&kvm_cpu_notifier); | ||
1322 | if (r) | ||
1323 | goto out_free_2; | ||
1324 | register_reboot_notifier(&kvm_reboot_notifier); | ||
1325 | |||
1326 | r = sysdev_class_register(&kvm_sysdev_class); | ||
1327 | if (r) | ||
1328 | goto out_free_3; | ||
1329 | |||
1330 | r = sysdev_register(&kvm_sysdev); | ||
1331 | if (r) | ||
1332 | goto out_free_4; | ||
1333 | |||
1334 | /* A kmem cache lets us meet the alignment requirements of fx_save. */ | ||
1335 | kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, | ||
1336 | __alignof__(struct kvm_vcpu), | ||
1337 | 0, NULL); | ||
1338 | if (!kvm_vcpu_cache) { | ||
1339 | r = -ENOMEM; | ||
1340 | goto out_free_5; | ||
1341 | } | ||
1342 | |||
1343 | kvm_chardev_ops.owner = module; | ||
1344 | |||
1345 | r = misc_register(&kvm_dev); | ||
1346 | if (r) { | ||
1347 | printk(KERN_ERR "kvm: misc device register failed\n"); | ||
1348 | goto out_free; | ||
1349 | } | ||
1350 | |||
1351 | kvm_preempt_ops.sched_in = kvm_sched_in; | ||
1352 | kvm_preempt_ops.sched_out = kvm_sched_out; | ||
1353 | |||
1354 | return 0; | ||
1355 | |||
1356 | out_free: | ||
1357 | kmem_cache_destroy(kvm_vcpu_cache); | ||
1358 | out_free_5: | ||
1359 | sysdev_unregister(&kvm_sysdev); | ||
1360 | out_free_4: | ||
1361 | sysdev_class_unregister(&kvm_sysdev_class); | ||
1362 | out_free_3: | ||
1363 | unregister_reboot_notifier(&kvm_reboot_notifier); | ||
1364 | unregister_cpu_notifier(&kvm_cpu_notifier); | ||
1365 | out_free_2: | ||
1366 | on_each_cpu(hardware_disable, NULL, 0, 1); | ||
1367 | out_free_1: | ||
1368 | kvm_arch_hardware_unsetup(); | ||
1369 | out_free_0: | ||
1370 | __free_page(bad_page); | ||
1371 | out: | ||
1372 | kvm_arch_exit(); | ||
1373 | kvm_exit_debug(); | ||
1374 | out_fail: | ||
1375 | return r; | ||
1376 | } | ||
1377 | EXPORT_SYMBOL_GPL(kvm_init); | ||
1378 | |||
1379 | void kvm_exit(void) | ||
1380 | { | ||
1381 | misc_deregister(&kvm_dev); | ||
1382 | kmem_cache_destroy(kvm_vcpu_cache); | ||
1383 | sysdev_unregister(&kvm_sysdev); | ||
1384 | sysdev_class_unregister(&kvm_sysdev_class); | ||
1385 | unregister_reboot_notifier(&kvm_reboot_notifier); | ||
1386 | unregister_cpu_notifier(&kvm_cpu_notifier); | ||
1387 | on_each_cpu(hardware_disable, NULL, 0, 1); | ||
1388 | kvm_arch_hardware_unsetup(); | ||
1389 | kvm_arch_exit(); | ||
1390 | kvm_exit_debug(); | ||
1391 | __free_page(bad_page); | ||
1392 | } | ||
1393 | EXPORT_SYMBOL_GPL(kvm_exit); | ||