diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-12-09 01:14:38 -0500 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-12-09 01:14:38 -0500 |
commit | bcd6acd51f3d4d1ada201e9bc5c40a31d6d80c71 (patch) | |
tree | 2f6dffd2d3e4dd67355a224de7e7a960335a92fd /virt/kvm/kvm_main.c | |
parent | 11c34c7deaeeebcee342cbc35e1bb2a6711b2431 (diff) | |
parent | 3ff6a468b45b5dfeb0e903e56f4eb27d34b2437c (diff) |
Merge commit 'origin/master' into next
Conflicts:
include/linux/kvm.h
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r-- | virt/kvm/kvm_main.c | 961 |
1 files changed, 147 insertions, 814 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index cdca63917e77..e1f2bf8d7b1e 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/swap.h> | 43 | #include <linux/swap.h> |
44 | #include <linux/bitops.h> | 44 | #include <linux/bitops.h> |
45 | #include <linux/spinlock.h> | 45 | #include <linux/spinlock.h> |
46 | #include <linux/compat.h> | ||
46 | 47 | ||
47 | #include <asm/processor.h> | 48 | #include <asm/processor.h> |
48 | #include <asm/io.h> | 49 | #include <asm/io.h> |
@@ -54,12 +55,6 @@ | |||
54 | #include "coalesced_mmio.h" | 55 | #include "coalesced_mmio.h" |
55 | #endif | 56 | #endif |
56 | 57 | ||
57 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | ||
58 | #include <linux/pci.h> | ||
59 | #include <linux/interrupt.h> | ||
60 | #include "irq.h" | ||
61 | #endif | ||
62 | |||
63 | #define CREATE_TRACE_POINTS | 58 | #define CREATE_TRACE_POINTS |
64 | #include <trace/events/kvm.h> | 59 | #include <trace/events/kvm.h> |
65 | 60 | ||
@@ -76,6 +71,8 @@ DEFINE_SPINLOCK(kvm_lock); | |||
76 | LIST_HEAD(vm_list); | 71 | LIST_HEAD(vm_list); |
77 | 72 | ||
78 | static cpumask_var_t cpus_hardware_enabled; | 73 | static cpumask_var_t cpus_hardware_enabled; |
74 | static int kvm_usage_count = 0; | ||
75 | static atomic_t hardware_enable_failed; | ||
79 | 76 | ||
80 | struct kmem_cache *kvm_vcpu_cache; | 77 | struct kmem_cache *kvm_vcpu_cache; |
81 | EXPORT_SYMBOL_GPL(kvm_vcpu_cache); | 78 | EXPORT_SYMBOL_GPL(kvm_vcpu_cache); |
@@ -86,615 +83,13 @@ struct dentry *kvm_debugfs_dir; | |||
86 | 83 | ||
87 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, | 84 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, |
88 | unsigned long arg); | 85 | unsigned long arg); |
86 | static int hardware_enable_all(void); | ||
87 | static void hardware_disable_all(void); | ||
89 | 88 | ||
90 | static bool kvm_rebooting; | 89 | static bool kvm_rebooting; |
91 | 90 | ||
92 | static bool largepages_enabled = true; | 91 | static bool largepages_enabled = true; |
93 | 92 | ||
94 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | ||
95 | static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, | ||
96 | int assigned_dev_id) | ||
97 | { | ||
98 | struct list_head *ptr; | ||
99 | struct kvm_assigned_dev_kernel *match; | ||
100 | |||
101 | list_for_each(ptr, head) { | ||
102 | match = list_entry(ptr, struct kvm_assigned_dev_kernel, list); | ||
103 | if (match->assigned_dev_id == assigned_dev_id) | ||
104 | return match; | ||
105 | } | ||
106 | return NULL; | ||
107 | } | ||
108 | |||
109 | static int find_index_from_host_irq(struct kvm_assigned_dev_kernel | ||
110 | *assigned_dev, int irq) | ||
111 | { | ||
112 | int i, index; | ||
113 | struct msix_entry *host_msix_entries; | ||
114 | |||
115 | host_msix_entries = assigned_dev->host_msix_entries; | ||
116 | |||
117 | index = -1; | ||
118 | for (i = 0; i < assigned_dev->entries_nr; i++) | ||
119 | if (irq == host_msix_entries[i].vector) { | ||
120 | index = i; | ||
121 | break; | ||
122 | } | ||
123 | if (index < 0) { | ||
124 | printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n"); | ||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | return index; | ||
129 | } | ||
130 | |||
131 | static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) | ||
132 | { | ||
133 | struct kvm_assigned_dev_kernel *assigned_dev; | ||
134 | struct kvm *kvm; | ||
135 | int i; | ||
136 | |||
137 | assigned_dev = container_of(work, struct kvm_assigned_dev_kernel, | ||
138 | interrupt_work); | ||
139 | kvm = assigned_dev->kvm; | ||
140 | |||
141 | mutex_lock(&kvm->irq_lock); | ||
142 | spin_lock_irq(&assigned_dev->assigned_dev_lock); | ||
143 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | ||
144 | struct kvm_guest_msix_entry *guest_entries = | ||
145 | assigned_dev->guest_msix_entries; | ||
146 | for (i = 0; i < assigned_dev->entries_nr; i++) { | ||
147 | if (!(guest_entries[i].flags & | ||
148 | KVM_ASSIGNED_MSIX_PENDING)) | ||
149 | continue; | ||
150 | guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING; | ||
151 | kvm_set_irq(assigned_dev->kvm, | ||
152 | assigned_dev->irq_source_id, | ||
153 | guest_entries[i].vector, 1); | ||
154 | } | ||
155 | } else | ||
156 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, | ||
157 | assigned_dev->guest_irq, 1); | ||
158 | |||
159 | spin_unlock_irq(&assigned_dev->assigned_dev_lock); | ||
160 | mutex_unlock(&assigned_dev->kvm->irq_lock); | ||
161 | } | ||
162 | |||
163 | static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) | ||
164 | { | ||
165 | unsigned long flags; | ||
166 | struct kvm_assigned_dev_kernel *assigned_dev = | ||
167 | (struct kvm_assigned_dev_kernel *) dev_id; | ||
168 | |||
169 | spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags); | ||
170 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | ||
171 | int index = find_index_from_host_irq(assigned_dev, irq); | ||
172 | if (index < 0) | ||
173 | goto out; | ||
174 | assigned_dev->guest_msix_entries[index].flags |= | ||
175 | KVM_ASSIGNED_MSIX_PENDING; | ||
176 | } | ||
177 | |||
178 | schedule_work(&assigned_dev->interrupt_work); | ||
179 | |||
180 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { | ||
181 | disable_irq_nosync(irq); | ||
182 | assigned_dev->host_irq_disabled = true; | ||
183 | } | ||
184 | |||
185 | out: | ||
186 | spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags); | ||
187 | return IRQ_HANDLED; | ||
188 | } | ||
189 | |||
190 | /* Ack the irq line for an assigned device */ | ||
191 | static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) | ||
192 | { | ||
193 | struct kvm_assigned_dev_kernel *dev; | ||
194 | unsigned long flags; | ||
195 | |||
196 | if (kian->gsi == -1) | ||
197 | return; | ||
198 | |||
199 | dev = container_of(kian, struct kvm_assigned_dev_kernel, | ||
200 | ack_notifier); | ||
201 | |||
202 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); | ||
203 | |||
204 | /* The guest irq may be shared so this ack may be | ||
205 | * from another device. | ||
206 | */ | ||
207 | spin_lock_irqsave(&dev->assigned_dev_lock, flags); | ||
208 | if (dev->host_irq_disabled) { | ||
209 | enable_irq(dev->host_irq); | ||
210 | dev->host_irq_disabled = false; | ||
211 | } | ||
212 | spin_unlock_irqrestore(&dev->assigned_dev_lock, flags); | ||
213 | } | ||
214 | |||
215 | static void deassign_guest_irq(struct kvm *kvm, | ||
216 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
217 | { | ||
218 | kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); | ||
219 | assigned_dev->ack_notifier.gsi = -1; | ||
220 | |||
221 | if (assigned_dev->irq_source_id != -1) | ||
222 | kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); | ||
223 | assigned_dev->irq_source_id = -1; | ||
224 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK); | ||
225 | } | ||
226 | |||
227 | /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */ | ||
228 | static void deassign_host_irq(struct kvm *kvm, | ||
229 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
230 | { | ||
231 | /* | ||
232 | * In kvm_free_device_irq, cancel_work_sync return true if: | ||
233 | * 1. work is scheduled, and then cancelled. | ||
234 | * 2. work callback is executed. | ||
235 | * | ||
236 | * The first one ensured that the irq is disabled and no more events | ||
237 | * would happen. But for the second one, the irq may be enabled (e.g. | ||
238 | * for MSI). So we disable irq here to prevent further events. | ||
239 | * | ||
240 | * Notice this maybe result in nested disable if the interrupt type is | ||
241 | * INTx, but it's OK for we are going to free it. | ||
242 | * | ||
243 | * If this function is a part of VM destroy, please ensure that till | ||
244 | * now, the kvm state is still legal for probably we also have to wait | ||
245 | * interrupt_work done. | ||
246 | */ | ||
247 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | ||
248 | int i; | ||
249 | for (i = 0; i < assigned_dev->entries_nr; i++) | ||
250 | disable_irq_nosync(assigned_dev-> | ||
251 | host_msix_entries[i].vector); | ||
252 | |||
253 | cancel_work_sync(&assigned_dev->interrupt_work); | ||
254 | |||
255 | for (i = 0; i < assigned_dev->entries_nr; i++) | ||
256 | free_irq(assigned_dev->host_msix_entries[i].vector, | ||
257 | (void *)assigned_dev); | ||
258 | |||
259 | assigned_dev->entries_nr = 0; | ||
260 | kfree(assigned_dev->host_msix_entries); | ||
261 | kfree(assigned_dev->guest_msix_entries); | ||
262 | pci_disable_msix(assigned_dev->dev); | ||
263 | } else { | ||
264 | /* Deal with MSI and INTx */ | ||
265 | disable_irq_nosync(assigned_dev->host_irq); | ||
266 | cancel_work_sync(&assigned_dev->interrupt_work); | ||
267 | |||
268 | free_irq(assigned_dev->host_irq, (void *)assigned_dev); | ||
269 | |||
270 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI) | ||
271 | pci_disable_msi(assigned_dev->dev); | ||
272 | } | ||
273 | |||
274 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK); | ||
275 | } | ||
276 | |||
277 | static int kvm_deassign_irq(struct kvm *kvm, | ||
278 | struct kvm_assigned_dev_kernel *assigned_dev, | ||
279 | unsigned long irq_requested_type) | ||
280 | { | ||
281 | unsigned long guest_irq_type, host_irq_type; | ||
282 | |||
283 | if (!irqchip_in_kernel(kvm)) | ||
284 | return -EINVAL; | ||
285 | /* no irq assignment to deassign */ | ||
286 | if (!assigned_dev->irq_requested_type) | ||
287 | return -ENXIO; | ||
288 | |||
289 | host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK; | ||
290 | guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK; | ||
291 | |||
292 | if (host_irq_type) | ||
293 | deassign_host_irq(kvm, assigned_dev); | ||
294 | if (guest_irq_type) | ||
295 | deassign_guest_irq(kvm, assigned_dev); | ||
296 | |||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | static void kvm_free_assigned_irq(struct kvm *kvm, | ||
301 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
302 | { | ||
303 | kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type); | ||
304 | } | ||
305 | |||
306 | static void kvm_free_assigned_device(struct kvm *kvm, | ||
307 | struct kvm_assigned_dev_kernel | ||
308 | *assigned_dev) | ||
309 | { | ||
310 | kvm_free_assigned_irq(kvm, assigned_dev); | ||
311 | |||
312 | pci_reset_function(assigned_dev->dev); | ||
313 | |||
314 | pci_release_regions(assigned_dev->dev); | ||
315 | pci_disable_device(assigned_dev->dev); | ||
316 | pci_dev_put(assigned_dev->dev); | ||
317 | |||
318 | list_del(&assigned_dev->list); | ||
319 | kfree(assigned_dev); | ||
320 | } | ||
321 | |||
322 | void kvm_free_all_assigned_devices(struct kvm *kvm) | ||
323 | { | ||
324 | struct list_head *ptr, *ptr2; | ||
325 | struct kvm_assigned_dev_kernel *assigned_dev; | ||
326 | |||
327 | list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { | ||
328 | assigned_dev = list_entry(ptr, | ||
329 | struct kvm_assigned_dev_kernel, | ||
330 | list); | ||
331 | |||
332 | kvm_free_assigned_device(kvm, assigned_dev); | ||
333 | } | ||
334 | } | ||
335 | |||
336 | static int assigned_device_enable_host_intx(struct kvm *kvm, | ||
337 | struct kvm_assigned_dev_kernel *dev) | ||
338 | { | ||
339 | dev->host_irq = dev->dev->irq; | ||
340 | /* Even though this is PCI, we don't want to use shared | ||
341 | * interrupts. Sharing host devices with guest-assigned devices | ||
342 | * on the same interrupt line is not a happy situation: there | ||
343 | * are going to be long delays in accepting, acking, etc. | ||
344 | */ | ||
345 | if (request_irq(dev->host_irq, kvm_assigned_dev_intr, | ||
346 | 0, "kvm_assigned_intx_device", (void *)dev)) | ||
347 | return -EIO; | ||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | #ifdef __KVM_HAVE_MSI | ||
352 | static int assigned_device_enable_host_msi(struct kvm *kvm, | ||
353 | struct kvm_assigned_dev_kernel *dev) | ||
354 | { | ||
355 | int r; | ||
356 | |||
357 | if (!dev->dev->msi_enabled) { | ||
358 | r = pci_enable_msi(dev->dev); | ||
359 | if (r) | ||
360 | return r; | ||
361 | } | ||
362 | |||
363 | dev->host_irq = dev->dev->irq; | ||
364 | if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0, | ||
365 | "kvm_assigned_msi_device", (void *)dev)) { | ||
366 | pci_disable_msi(dev->dev); | ||
367 | return -EIO; | ||
368 | } | ||
369 | |||
370 | return 0; | ||
371 | } | ||
372 | #endif | ||
373 | |||
374 | #ifdef __KVM_HAVE_MSIX | ||
375 | static int assigned_device_enable_host_msix(struct kvm *kvm, | ||
376 | struct kvm_assigned_dev_kernel *dev) | ||
377 | { | ||
378 | int i, r = -EINVAL; | ||
379 | |||
380 | /* host_msix_entries and guest_msix_entries should have been | ||
381 | * initialized */ | ||
382 | if (dev->entries_nr == 0) | ||
383 | return r; | ||
384 | |||
385 | r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr); | ||
386 | if (r) | ||
387 | return r; | ||
388 | |||
389 | for (i = 0; i < dev->entries_nr; i++) { | ||
390 | r = request_irq(dev->host_msix_entries[i].vector, | ||
391 | kvm_assigned_dev_intr, 0, | ||
392 | "kvm_assigned_msix_device", | ||
393 | (void *)dev); | ||
394 | /* FIXME: free requested_irq's on failure */ | ||
395 | if (r) | ||
396 | return r; | ||
397 | } | ||
398 | |||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | #endif | ||
403 | |||
404 | static int assigned_device_enable_guest_intx(struct kvm *kvm, | ||
405 | struct kvm_assigned_dev_kernel *dev, | ||
406 | struct kvm_assigned_irq *irq) | ||
407 | { | ||
408 | dev->guest_irq = irq->guest_irq; | ||
409 | dev->ack_notifier.gsi = irq->guest_irq; | ||
410 | return 0; | ||
411 | } | ||
412 | |||
413 | #ifdef __KVM_HAVE_MSI | ||
414 | static int assigned_device_enable_guest_msi(struct kvm *kvm, | ||
415 | struct kvm_assigned_dev_kernel *dev, | ||
416 | struct kvm_assigned_irq *irq) | ||
417 | { | ||
418 | dev->guest_irq = irq->guest_irq; | ||
419 | dev->ack_notifier.gsi = -1; | ||
420 | dev->host_irq_disabled = false; | ||
421 | return 0; | ||
422 | } | ||
423 | #endif | ||
424 | #ifdef __KVM_HAVE_MSIX | ||
425 | static int assigned_device_enable_guest_msix(struct kvm *kvm, | ||
426 | struct kvm_assigned_dev_kernel *dev, | ||
427 | struct kvm_assigned_irq *irq) | ||
428 | { | ||
429 | dev->guest_irq = irq->guest_irq; | ||
430 | dev->ack_notifier.gsi = -1; | ||
431 | dev->host_irq_disabled = false; | ||
432 | return 0; | ||
433 | } | ||
434 | #endif | ||
435 | |||
436 | static int assign_host_irq(struct kvm *kvm, | ||
437 | struct kvm_assigned_dev_kernel *dev, | ||
438 | __u32 host_irq_type) | ||
439 | { | ||
440 | int r = -EEXIST; | ||
441 | |||
442 | if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK) | ||
443 | return r; | ||
444 | |||
445 | switch (host_irq_type) { | ||
446 | case KVM_DEV_IRQ_HOST_INTX: | ||
447 | r = assigned_device_enable_host_intx(kvm, dev); | ||
448 | break; | ||
449 | #ifdef __KVM_HAVE_MSI | ||
450 | case KVM_DEV_IRQ_HOST_MSI: | ||
451 | r = assigned_device_enable_host_msi(kvm, dev); | ||
452 | break; | ||
453 | #endif | ||
454 | #ifdef __KVM_HAVE_MSIX | ||
455 | case KVM_DEV_IRQ_HOST_MSIX: | ||
456 | r = assigned_device_enable_host_msix(kvm, dev); | ||
457 | break; | ||
458 | #endif | ||
459 | default: | ||
460 | r = -EINVAL; | ||
461 | } | ||
462 | |||
463 | if (!r) | ||
464 | dev->irq_requested_type |= host_irq_type; | ||
465 | |||
466 | return r; | ||
467 | } | ||
468 | |||
469 | static int assign_guest_irq(struct kvm *kvm, | ||
470 | struct kvm_assigned_dev_kernel *dev, | ||
471 | struct kvm_assigned_irq *irq, | ||
472 | unsigned long guest_irq_type) | ||
473 | { | ||
474 | int id; | ||
475 | int r = -EEXIST; | ||
476 | |||
477 | if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK) | ||
478 | return r; | ||
479 | |||
480 | id = kvm_request_irq_source_id(kvm); | ||
481 | if (id < 0) | ||
482 | return id; | ||
483 | |||
484 | dev->irq_source_id = id; | ||
485 | |||
486 | switch (guest_irq_type) { | ||
487 | case KVM_DEV_IRQ_GUEST_INTX: | ||
488 | r = assigned_device_enable_guest_intx(kvm, dev, irq); | ||
489 | break; | ||
490 | #ifdef __KVM_HAVE_MSI | ||
491 | case KVM_DEV_IRQ_GUEST_MSI: | ||
492 | r = assigned_device_enable_guest_msi(kvm, dev, irq); | ||
493 | break; | ||
494 | #endif | ||
495 | #ifdef __KVM_HAVE_MSIX | ||
496 | case KVM_DEV_IRQ_GUEST_MSIX: | ||
497 | r = assigned_device_enable_guest_msix(kvm, dev, irq); | ||
498 | break; | ||
499 | #endif | ||
500 | default: | ||
501 | r = -EINVAL; | ||
502 | } | ||
503 | |||
504 | if (!r) { | ||
505 | dev->irq_requested_type |= guest_irq_type; | ||
506 | kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier); | ||
507 | } else | ||
508 | kvm_free_irq_source_id(kvm, dev->irq_source_id); | ||
509 | |||
510 | return r; | ||
511 | } | ||
512 | |||
513 | /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */ | ||
514 | static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, | ||
515 | struct kvm_assigned_irq *assigned_irq) | ||
516 | { | ||
517 | int r = -EINVAL; | ||
518 | struct kvm_assigned_dev_kernel *match; | ||
519 | unsigned long host_irq_type, guest_irq_type; | ||
520 | |||
521 | if (!capable(CAP_SYS_RAWIO)) | ||
522 | return -EPERM; | ||
523 | |||
524 | if (!irqchip_in_kernel(kvm)) | ||
525 | return r; | ||
526 | |||
527 | mutex_lock(&kvm->lock); | ||
528 | r = -ENODEV; | ||
529 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
530 | assigned_irq->assigned_dev_id); | ||
531 | if (!match) | ||
532 | goto out; | ||
533 | |||
534 | host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK); | ||
535 | guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK); | ||
536 | |||
537 | r = -EINVAL; | ||
538 | /* can only assign one type at a time */ | ||
539 | if (hweight_long(host_irq_type) > 1) | ||
540 | goto out; | ||
541 | if (hweight_long(guest_irq_type) > 1) | ||
542 | goto out; | ||
543 | if (host_irq_type == 0 && guest_irq_type == 0) | ||
544 | goto out; | ||
545 | |||
546 | r = 0; | ||
547 | if (host_irq_type) | ||
548 | r = assign_host_irq(kvm, match, host_irq_type); | ||
549 | if (r) | ||
550 | goto out; | ||
551 | |||
552 | if (guest_irq_type) | ||
553 | r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type); | ||
554 | out: | ||
555 | mutex_unlock(&kvm->lock); | ||
556 | return r; | ||
557 | } | ||
558 | |||
559 | static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, | ||
560 | struct kvm_assigned_irq | ||
561 | *assigned_irq) | ||
562 | { | ||
563 | int r = -ENODEV; | ||
564 | struct kvm_assigned_dev_kernel *match; | ||
565 | |||
566 | mutex_lock(&kvm->lock); | ||
567 | |||
568 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
569 | assigned_irq->assigned_dev_id); | ||
570 | if (!match) | ||
571 | goto out; | ||
572 | |||
573 | r = kvm_deassign_irq(kvm, match, assigned_irq->flags); | ||
574 | out: | ||
575 | mutex_unlock(&kvm->lock); | ||
576 | return r; | ||
577 | } | ||
578 | |||
579 | static int kvm_vm_ioctl_assign_device(struct kvm *kvm, | ||
580 | struct kvm_assigned_pci_dev *assigned_dev) | ||
581 | { | ||
582 | int r = 0; | ||
583 | struct kvm_assigned_dev_kernel *match; | ||
584 | struct pci_dev *dev; | ||
585 | |||
586 | down_read(&kvm->slots_lock); | ||
587 | mutex_lock(&kvm->lock); | ||
588 | |||
589 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
590 | assigned_dev->assigned_dev_id); | ||
591 | if (match) { | ||
592 | /* device already assigned */ | ||
593 | r = -EEXIST; | ||
594 | goto out; | ||
595 | } | ||
596 | |||
597 | match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL); | ||
598 | if (match == NULL) { | ||
599 | printk(KERN_INFO "%s: Couldn't allocate memory\n", | ||
600 | __func__); | ||
601 | r = -ENOMEM; | ||
602 | goto out; | ||
603 | } | ||
604 | dev = pci_get_bus_and_slot(assigned_dev->busnr, | ||
605 | assigned_dev->devfn); | ||
606 | if (!dev) { | ||
607 | printk(KERN_INFO "%s: host device not found\n", __func__); | ||
608 | r = -EINVAL; | ||
609 | goto out_free; | ||
610 | } | ||
611 | if (pci_enable_device(dev)) { | ||
612 | printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); | ||
613 | r = -EBUSY; | ||
614 | goto out_put; | ||
615 | } | ||
616 | r = pci_request_regions(dev, "kvm_assigned_device"); | ||
617 | if (r) { | ||
618 | printk(KERN_INFO "%s: Could not get access to device regions\n", | ||
619 | __func__); | ||
620 | goto out_disable; | ||
621 | } | ||
622 | |||
623 | pci_reset_function(dev); | ||
624 | |||
625 | match->assigned_dev_id = assigned_dev->assigned_dev_id; | ||
626 | match->host_busnr = assigned_dev->busnr; | ||
627 | match->host_devfn = assigned_dev->devfn; | ||
628 | match->flags = assigned_dev->flags; | ||
629 | match->dev = dev; | ||
630 | spin_lock_init(&match->assigned_dev_lock); | ||
631 | match->irq_source_id = -1; | ||
632 | match->kvm = kvm; | ||
633 | match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; | ||
634 | INIT_WORK(&match->interrupt_work, | ||
635 | kvm_assigned_dev_interrupt_work_handler); | ||
636 | |||
637 | list_add(&match->list, &kvm->arch.assigned_dev_head); | ||
638 | |||
639 | if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { | ||
640 | if (!kvm->arch.iommu_domain) { | ||
641 | r = kvm_iommu_map_guest(kvm); | ||
642 | if (r) | ||
643 | goto out_list_del; | ||
644 | } | ||
645 | r = kvm_assign_device(kvm, match); | ||
646 | if (r) | ||
647 | goto out_list_del; | ||
648 | } | ||
649 | |||
650 | out: | ||
651 | mutex_unlock(&kvm->lock); | ||
652 | up_read(&kvm->slots_lock); | ||
653 | return r; | ||
654 | out_list_del: | ||
655 | list_del(&match->list); | ||
656 | pci_release_regions(dev); | ||
657 | out_disable: | ||
658 | pci_disable_device(dev); | ||
659 | out_put: | ||
660 | pci_dev_put(dev); | ||
661 | out_free: | ||
662 | kfree(match); | ||
663 | mutex_unlock(&kvm->lock); | ||
664 | up_read(&kvm->slots_lock); | ||
665 | return r; | ||
666 | } | ||
667 | #endif | ||
668 | |||
669 | #ifdef KVM_CAP_DEVICE_DEASSIGNMENT | ||
670 | static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, | ||
671 | struct kvm_assigned_pci_dev *assigned_dev) | ||
672 | { | ||
673 | int r = 0; | ||
674 | struct kvm_assigned_dev_kernel *match; | ||
675 | |||
676 | mutex_lock(&kvm->lock); | ||
677 | |||
678 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
679 | assigned_dev->assigned_dev_id); | ||
680 | if (!match) { | ||
681 | printk(KERN_INFO "%s: device hasn't been assigned before, " | ||
682 | "so cannot be deassigned\n", __func__); | ||
683 | r = -EINVAL; | ||
684 | goto out; | ||
685 | } | ||
686 | |||
687 | if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) | ||
688 | kvm_deassign_device(kvm, match); | ||
689 | |||
690 | kvm_free_assigned_device(kvm, match); | ||
691 | |||
692 | out: | ||
693 | mutex_unlock(&kvm->lock); | ||
694 | return r; | ||
695 | } | ||
696 | #endif | ||
697 | |||
698 | inline int kvm_is_mmio_pfn(pfn_t pfn) | 93 | inline int kvm_is_mmio_pfn(pfn_t pfn) |
699 | { | 94 | { |
700 | if (pfn_valid(pfn)) { | 95 | if (pfn_valid(pfn)) { |
@@ -950,6 +345,7 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { | |||
950 | 345 | ||
951 | static struct kvm *kvm_create_vm(void) | 346 | static struct kvm *kvm_create_vm(void) |
952 | { | 347 | { |
348 | int r = 0; | ||
953 | struct kvm *kvm = kvm_arch_create_vm(); | 349 | struct kvm *kvm = kvm_arch_create_vm(); |
954 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 350 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
955 | struct page *page; | 351 | struct page *page; |
@@ -957,16 +353,21 @@ static struct kvm *kvm_create_vm(void) | |||
957 | 353 | ||
958 | if (IS_ERR(kvm)) | 354 | if (IS_ERR(kvm)) |
959 | goto out; | 355 | goto out; |
356 | |||
357 | r = hardware_enable_all(); | ||
358 | if (r) | ||
359 | goto out_err_nodisable; | ||
360 | |||
960 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | 361 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
961 | INIT_LIST_HEAD(&kvm->irq_routing); | ||
962 | INIT_HLIST_HEAD(&kvm->mask_notifier_list); | 362 | INIT_HLIST_HEAD(&kvm->mask_notifier_list); |
363 | INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); | ||
963 | #endif | 364 | #endif |
964 | 365 | ||
965 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 366 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
966 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | 367 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
967 | if (!page) { | 368 | if (!page) { |
968 | kfree(kvm); | 369 | r = -ENOMEM; |
969 | return ERR_PTR(-ENOMEM); | 370 | goto out_err; |
970 | } | 371 | } |
971 | kvm->coalesced_mmio_ring = | 372 | kvm->coalesced_mmio_ring = |
972 | (struct kvm_coalesced_mmio_ring *)page_address(page); | 373 | (struct kvm_coalesced_mmio_ring *)page_address(page); |
@@ -974,15 +375,13 @@ static struct kvm *kvm_create_vm(void) | |||
974 | 375 | ||
975 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) | 376 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
976 | { | 377 | { |
977 | int err; | ||
978 | kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; | 378 | kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; |
979 | err = mmu_notifier_register(&kvm->mmu_notifier, current->mm); | 379 | r = mmu_notifier_register(&kvm->mmu_notifier, current->mm); |
980 | if (err) { | 380 | if (r) { |
981 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 381 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
982 | put_page(page); | 382 | put_page(page); |
983 | #endif | 383 | #endif |
984 | kfree(kvm); | 384 | goto out_err; |
985 | return ERR_PTR(err); | ||
986 | } | 385 | } |
987 | } | 386 | } |
988 | #endif | 387 | #endif |
@@ -1006,6 +405,12 @@ static struct kvm *kvm_create_vm(void) | |||
1006 | #endif | 405 | #endif |
1007 | out: | 406 | out: |
1008 | return kvm; | 407 | return kvm; |
408 | |||
409 | out_err: | ||
410 | hardware_disable_all(); | ||
411 | out_err_nodisable: | ||
412 | kfree(kvm); | ||
413 | return ERR_PTR(r); | ||
1009 | } | 414 | } |
1010 | 415 | ||
1011 | /* | 416 | /* |
@@ -1064,6 +469,7 @@ static void kvm_destroy_vm(struct kvm *kvm) | |||
1064 | kvm_arch_flush_shadow(kvm); | 469 | kvm_arch_flush_shadow(kvm); |
1065 | #endif | 470 | #endif |
1066 | kvm_arch_destroy_vm(kvm); | 471 | kvm_arch_destroy_vm(kvm); |
472 | hardware_disable_all(); | ||
1067 | mmdrop(mm); | 473 | mmdrop(mm); |
1068 | } | 474 | } |
1069 | 475 | ||
@@ -1690,9 +1096,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) | |||
1690 | if (signal_pending(current)) | 1096 | if (signal_pending(current)) |
1691 | break; | 1097 | break; |
1692 | 1098 | ||
1693 | vcpu_put(vcpu); | ||
1694 | schedule(); | 1099 | schedule(); |
1695 | vcpu_load(vcpu); | ||
1696 | } | 1100 | } |
1697 | 1101 | ||
1698 | finish_wait(&vcpu->wq, &wait); | 1102 | finish_wait(&vcpu->wq, &wait); |
@@ -1706,6 +1110,21 @@ void kvm_resched(struct kvm_vcpu *vcpu) | |||
1706 | } | 1110 | } |
1707 | EXPORT_SYMBOL_GPL(kvm_resched); | 1111 | EXPORT_SYMBOL_GPL(kvm_resched); |
1708 | 1112 | ||
1113 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu) | ||
1114 | { | ||
1115 | ktime_t expires; | ||
1116 | DEFINE_WAIT(wait); | ||
1117 | |||
1118 | prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); | ||
1119 | |||
1120 | /* Sleep for 100 us, and hope lock-holder got scheduled */ | ||
1121 | expires = ktime_add_ns(ktime_get(), 100000UL); | ||
1122 | schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); | ||
1123 | |||
1124 | finish_wait(&vcpu->wq, &wait); | ||
1125 | } | ||
1126 | EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); | ||
1127 | |||
1709 | static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1128 | static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
1710 | { | 1129 | { |
1711 | struct kvm_vcpu *vcpu = vma->vm_file->private_data; | 1130 | struct kvm_vcpu *vcpu = vma->vm_file->private_data; |
@@ -1829,88 +1248,6 @@ static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) | |||
1829 | return 0; | 1248 | return 0; |
1830 | } | 1249 | } |
1831 | 1250 | ||
1832 | #ifdef __KVM_HAVE_MSIX | ||
1833 | static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm, | ||
1834 | struct kvm_assigned_msix_nr *entry_nr) | ||
1835 | { | ||
1836 | int r = 0; | ||
1837 | struct kvm_assigned_dev_kernel *adev; | ||
1838 | |||
1839 | mutex_lock(&kvm->lock); | ||
1840 | |||
1841 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
1842 | entry_nr->assigned_dev_id); | ||
1843 | if (!adev) { | ||
1844 | r = -EINVAL; | ||
1845 | goto msix_nr_out; | ||
1846 | } | ||
1847 | |||
1848 | if (adev->entries_nr == 0) { | ||
1849 | adev->entries_nr = entry_nr->entry_nr; | ||
1850 | if (adev->entries_nr == 0 || | ||
1851 | adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) { | ||
1852 | r = -EINVAL; | ||
1853 | goto msix_nr_out; | ||
1854 | } | ||
1855 | |||
1856 | adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) * | ||
1857 | entry_nr->entry_nr, | ||
1858 | GFP_KERNEL); | ||
1859 | if (!adev->host_msix_entries) { | ||
1860 | r = -ENOMEM; | ||
1861 | goto msix_nr_out; | ||
1862 | } | ||
1863 | adev->guest_msix_entries = kzalloc( | ||
1864 | sizeof(struct kvm_guest_msix_entry) * | ||
1865 | entry_nr->entry_nr, GFP_KERNEL); | ||
1866 | if (!adev->guest_msix_entries) { | ||
1867 | kfree(adev->host_msix_entries); | ||
1868 | r = -ENOMEM; | ||
1869 | goto msix_nr_out; | ||
1870 | } | ||
1871 | } else /* Not allowed set MSI-X number twice */ | ||
1872 | r = -EINVAL; | ||
1873 | msix_nr_out: | ||
1874 | mutex_unlock(&kvm->lock); | ||
1875 | return r; | ||
1876 | } | ||
1877 | |||
1878 | static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm, | ||
1879 | struct kvm_assigned_msix_entry *entry) | ||
1880 | { | ||
1881 | int r = 0, i; | ||
1882 | struct kvm_assigned_dev_kernel *adev; | ||
1883 | |||
1884 | mutex_lock(&kvm->lock); | ||
1885 | |||
1886 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
1887 | entry->assigned_dev_id); | ||
1888 | |||
1889 | if (!adev) { | ||
1890 | r = -EINVAL; | ||
1891 | goto msix_entry_out; | ||
1892 | } | ||
1893 | |||
1894 | for (i = 0; i < adev->entries_nr; i++) | ||
1895 | if (adev->guest_msix_entries[i].vector == 0 || | ||
1896 | adev->guest_msix_entries[i].entry == entry->entry) { | ||
1897 | adev->guest_msix_entries[i].entry = entry->entry; | ||
1898 | adev->guest_msix_entries[i].vector = entry->gsi; | ||
1899 | adev->host_msix_entries[i].entry = entry->entry; | ||
1900 | break; | ||
1901 | } | ||
1902 | if (i == adev->entries_nr) { | ||
1903 | r = -ENOSPC; | ||
1904 | goto msix_entry_out; | ||
1905 | } | ||
1906 | |||
1907 | msix_entry_out: | ||
1908 | mutex_unlock(&kvm->lock); | ||
1909 | |||
1910 | return r; | ||
1911 | } | ||
1912 | #endif | ||
1913 | |||
1914 | static long kvm_vcpu_ioctl(struct file *filp, | 1251 | static long kvm_vcpu_ioctl(struct file *filp, |
1915 | unsigned int ioctl, unsigned long arg) | 1252 | unsigned int ioctl, unsigned long arg) |
1916 | { | 1253 | { |
@@ -2169,112 +1506,6 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2169 | break; | 1506 | break; |
2170 | } | 1507 | } |
2171 | #endif | 1508 | #endif |
2172 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | ||
2173 | case KVM_ASSIGN_PCI_DEVICE: { | ||
2174 | struct kvm_assigned_pci_dev assigned_dev; | ||
2175 | |||
2176 | r = -EFAULT; | ||
2177 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | ||
2178 | goto out; | ||
2179 | r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev); | ||
2180 | if (r) | ||
2181 | goto out; | ||
2182 | break; | ||
2183 | } | ||
2184 | case KVM_ASSIGN_IRQ: { | ||
2185 | r = -EOPNOTSUPP; | ||
2186 | break; | ||
2187 | } | ||
2188 | #ifdef KVM_CAP_ASSIGN_DEV_IRQ | ||
2189 | case KVM_ASSIGN_DEV_IRQ: { | ||
2190 | struct kvm_assigned_irq assigned_irq; | ||
2191 | |||
2192 | r = -EFAULT; | ||
2193 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) | ||
2194 | goto out; | ||
2195 | r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq); | ||
2196 | if (r) | ||
2197 | goto out; | ||
2198 | break; | ||
2199 | } | ||
2200 | case KVM_DEASSIGN_DEV_IRQ: { | ||
2201 | struct kvm_assigned_irq assigned_irq; | ||
2202 | |||
2203 | r = -EFAULT; | ||
2204 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) | ||
2205 | goto out; | ||
2206 | r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq); | ||
2207 | if (r) | ||
2208 | goto out; | ||
2209 | break; | ||
2210 | } | ||
2211 | #endif | ||
2212 | #endif | ||
2213 | #ifdef KVM_CAP_DEVICE_DEASSIGNMENT | ||
2214 | case KVM_DEASSIGN_PCI_DEVICE: { | ||
2215 | struct kvm_assigned_pci_dev assigned_dev; | ||
2216 | |||
2217 | r = -EFAULT; | ||
2218 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | ||
2219 | goto out; | ||
2220 | r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev); | ||
2221 | if (r) | ||
2222 | goto out; | ||
2223 | break; | ||
2224 | } | ||
2225 | #endif | ||
2226 | #ifdef KVM_CAP_IRQ_ROUTING | ||
2227 | case KVM_SET_GSI_ROUTING: { | ||
2228 | struct kvm_irq_routing routing; | ||
2229 | struct kvm_irq_routing __user *urouting; | ||
2230 | struct kvm_irq_routing_entry *entries; | ||
2231 | |||
2232 | r = -EFAULT; | ||
2233 | if (copy_from_user(&routing, argp, sizeof(routing))) | ||
2234 | goto out; | ||
2235 | r = -EINVAL; | ||
2236 | if (routing.nr >= KVM_MAX_IRQ_ROUTES) | ||
2237 | goto out; | ||
2238 | if (routing.flags) | ||
2239 | goto out; | ||
2240 | r = -ENOMEM; | ||
2241 | entries = vmalloc(routing.nr * sizeof(*entries)); | ||
2242 | if (!entries) | ||
2243 | goto out; | ||
2244 | r = -EFAULT; | ||
2245 | urouting = argp; | ||
2246 | if (copy_from_user(entries, urouting->entries, | ||
2247 | routing.nr * sizeof(*entries))) | ||
2248 | goto out_free_irq_routing; | ||
2249 | r = kvm_set_irq_routing(kvm, entries, routing.nr, | ||
2250 | routing.flags); | ||
2251 | out_free_irq_routing: | ||
2252 | vfree(entries); | ||
2253 | break; | ||
2254 | } | ||
2255 | #endif /* KVM_CAP_IRQ_ROUTING */ | ||
2256 | #ifdef __KVM_HAVE_MSIX | ||
2257 | case KVM_ASSIGN_SET_MSIX_NR: { | ||
2258 | struct kvm_assigned_msix_nr entry_nr; | ||
2259 | r = -EFAULT; | ||
2260 | if (copy_from_user(&entry_nr, argp, sizeof entry_nr)) | ||
2261 | goto out; | ||
2262 | r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr); | ||
2263 | if (r) | ||
2264 | goto out; | ||
2265 | break; | ||
2266 | } | ||
2267 | case KVM_ASSIGN_SET_MSIX_ENTRY: { | ||
2268 | struct kvm_assigned_msix_entry entry; | ||
2269 | r = -EFAULT; | ||
2270 | if (copy_from_user(&entry, argp, sizeof entry)) | ||
2271 | goto out; | ||
2272 | r = kvm_vm_ioctl_set_msix_entry(kvm, &entry); | ||
2273 | if (r) | ||
2274 | goto out; | ||
2275 | break; | ||
2276 | } | ||
2277 | #endif | ||
2278 | case KVM_IRQFD: { | 1509 | case KVM_IRQFD: { |
2279 | struct kvm_irqfd data; | 1510 | struct kvm_irqfd data; |
2280 | 1511 | ||
@@ -2306,11 +1537,59 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2306 | #endif | 1537 | #endif |
2307 | default: | 1538 | default: |
2308 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); | 1539 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); |
1540 | if (r == -ENOTTY) | ||
1541 | r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); | ||
2309 | } | 1542 | } |
2310 | out: | 1543 | out: |
2311 | return r; | 1544 | return r; |
2312 | } | 1545 | } |
2313 | 1546 | ||
1547 | #ifdef CONFIG_COMPAT | ||
1548 | struct compat_kvm_dirty_log { | ||
1549 | __u32 slot; | ||
1550 | __u32 padding1; | ||
1551 | union { | ||
1552 | compat_uptr_t dirty_bitmap; /* one bit per page */ | ||
1553 | __u64 padding2; | ||
1554 | }; | ||
1555 | }; | ||
1556 | |||
1557 | static long kvm_vm_compat_ioctl(struct file *filp, | ||
1558 | unsigned int ioctl, unsigned long arg) | ||
1559 | { | ||
1560 | struct kvm *kvm = filp->private_data; | ||
1561 | int r; | ||
1562 | |||
1563 | if (kvm->mm != current->mm) | ||
1564 | return -EIO; | ||
1565 | switch (ioctl) { | ||
1566 | case KVM_GET_DIRTY_LOG: { | ||
1567 | struct compat_kvm_dirty_log compat_log; | ||
1568 | struct kvm_dirty_log log; | ||
1569 | |||
1570 | r = -EFAULT; | ||
1571 | if (copy_from_user(&compat_log, (void __user *)arg, | ||
1572 | sizeof(compat_log))) | ||
1573 | goto out; | ||
1574 | log.slot = compat_log.slot; | ||
1575 | log.padding1 = compat_log.padding1; | ||
1576 | log.padding2 = compat_log.padding2; | ||
1577 | log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); | ||
1578 | |||
1579 | r = kvm_vm_ioctl_get_dirty_log(kvm, &log); | ||
1580 | if (r) | ||
1581 | goto out; | ||
1582 | break; | ||
1583 | } | ||
1584 | default: | ||
1585 | r = kvm_vm_ioctl(filp, ioctl, arg); | ||
1586 | } | ||
1587 | |||
1588 | out: | ||
1589 | return r; | ||
1590 | } | ||
1591 | #endif | ||
1592 | |||
2314 | static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1593 | static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
2315 | { | 1594 | { |
2316 | struct page *page[1]; | 1595 | struct page *page[1]; |
@@ -2345,7 +1624,9 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) | |||
2345 | static struct file_operations kvm_vm_fops = { | 1624 | static struct file_operations kvm_vm_fops = { |
2346 | .release = kvm_vm_release, | 1625 | .release = kvm_vm_release, |
2347 | .unlocked_ioctl = kvm_vm_ioctl, | 1626 | .unlocked_ioctl = kvm_vm_ioctl, |
2348 | .compat_ioctl = kvm_vm_ioctl, | 1627 | #ifdef CONFIG_COMPAT |
1628 | .compat_ioctl = kvm_vm_compat_ioctl, | ||
1629 | #endif | ||
2349 | .mmap = kvm_vm_mmap, | 1630 | .mmap = kvm_vm_mmap, |
2350 | }; | 1631 | }; |
2351 | 1632 | ||
@@ -2373,6 +1654,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg) | |||
2373 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE | 1654 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE |
2374 | case KVM_CAP_SET_BOOT_CPU_ID: | 1655 | case KVM_CAP_SET_BOOT_CPU_ID: |
2375 | #endif | 1656 | #endif |
1657 | case KVM_CAP_INTERNAL_ERROR_DATA: | ||
2376 | return 1; | 1658 | return 1; |
2377 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | 1659 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
2378 | case KVM_CAP_IRQ_ROUTING: | 1660 | case KVM_CAP_IRQ_ROUTING: |
@@ -2443,11 +1725,21 @@ static struct miscdevice kvm_dev = { | |||
2443 | static void hardware_enable(void *junk) | 1725 | static void hardware_enable(void *junk) |
2444 | { | 1726 | { |
2445 | int cpu = raw_smp_processor_id(); | 1727 | int cpu = raw_smp_processor_id(); |
1728 | int r; | ||
2446 | 1729 | ||
2447 | if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) | 1730 | if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) |
2448 | return; | 1731 | return; |
1732 | |||
2449 | cpumask_set_cpu(cpu, cpus_hardware_enabled); | 1733 | cpumask_set_cpu(cpu, cpus_hardware_enabled); |
2450 | kvm_arch_hardware_enable(NULL); | 1734 | |
1735 | r = kvm_arch_hardware_enable(NULL); | ||
1736 | |||
1737 | if (r) { | ||
1738 | cpumask_clear_cpu(cpu, cpus_hardware_enabled); | ||
1739 | atomic_inc(&hardware_enable_failed); | ||
1740 | printk(KERN_INFO "kvm: enabling virtualization on " | ||
1741 | "CPU%d failed\n", cpu); | ||
1742 | } | ||
2451 | } | 1743 | } |
2452 | 1744 | ||
2453 | static void hardware_disable(void *junk) | 1745 | static void hardware_disable(void *junk) |
@@ -2460,11 +1752,52 @@ static void hardware_disable(void *junk) | |||
2460 | kvm_arch_hardware_disable(NULL); | 1752 | kvm_arch_hardware_disable(NULL); |
2461 | } | 1753 | } |
2462 | 1754 | ||
1755 | static void hardware_disable_all_nolock(void) | ||
1756 | { | ||
1757 | BUG_ON(!kvm_usage_count); | ||
1758 | |||
1759 | kvm_usage_count--; | ||
1760 | if (!kvm_usage_count) | ||
1761 | on_each_cpu(hardware_disable, NULL, 1); | ||
1762 | } | ||
1763 | |||
1764 | static void hardware_disable_all(void) | ||
1765 | { | ||
1766 | spin_lock(&kvm_lock); | ||
1767 | hardware_disable_all_nolock(); | ||
1768 | spin_unlock(&kvm_lock); | ||
1769 | } | ||
1770 | |||
1771 | static int hardware_enable_all(void) | ||
1772 | { | ||
1773 | int r = 0; | ||
1774 | |||
1775 | spin_lock(&kvm_lock); | ||
1776 | |||
1777 | kvm_usage_count++; | ||
1778 | if (kvm_usage_count == 1) { | ||
1779 | atomic_set(&hardware_enable_failed, 0); | ||
1780 | on_each_cpu(hardware_enable, NULL, 1); | ||
1781 | |||
1782 | if (atomic_read(&hardware_enable_failed)) { | ||
1783 | hardware_disable_all_nolock(); | ||
1784 | r = -EBUSY; | ||
1785 | } | ||
1786 | } | ||
1787 | |||
1788 | spin_unlock(&kvm_lock); | ||
1789 | |||
1790 | return r; | ||
1791 | } | ||
1792 | |||
2463 | static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | 1793 | static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, |
2464 | void *v) | 1794 | void *v) |
2465 | { | 1795 | { |
2466 | int cpu = (long)v; | 1796 | int cpu = (long)v; |
2467 | 1797 | ||
1798 | if (!kvm_usage_count) | ||
1799 | return NOTIFY_OK; | ||
1800 | |||
2468 | val &= ~CPU_TASKS_FROZEN; | 1801 | val &= ~CPU_TASKS_FROZEN; |
2469 | switch (val) { | 1802 | switch (val) { |
2470 | case CPU_DYING: | 1803 | case CPU_DYING: |
@@ -2667,13 +2000,15 @@ static void kvm_exit_debug(void) | |||
2667 | 2000 | ||
2668 | static int kvm_suspend(struct sys_device *dev, pm_message_t state) | 2001 | static int kvm_suspend(struct sys_device *dev, pm_message_t state) |
2669 | { | 2002 | { |
2670 | hardware_disable(NULL); | 2003 | if (kvm_usage_count) |
2004 | hardware_disable(NULL); | ||
2671 | return 0; | 2005 | return 0; |
2672 | } | 2006 | } |
2673 | 2007 | ||
2674 | static int kvm_resume(struct sys_device *dev) | 2008 | static int kvm_resume(struct sys_device *dev) |
2675 | { | 2009 | { |
2676 | hardware_enable(NULL); | 2010 | if (kvm_usage_count) |
2011 | hardware_enable(NULL); | ||
2677 | return 0; | 2012 | return 0; |
2678 | } | 2013 | } |
2679 | 2014 | ||
@@ -2748,7 +2083,6 @@ int kvm_init(void *opaque, unsigned int vcpu_size, | |||
2748 | goto out_free_1; | 2083 | goto out_free_1; |
2749 | } | 2084 | } |
2750 | 2085 | ||
2751 | on_each_cpu(hardware_enable, NULL, 1); | ||
2752 | r = register_cpu_notifier(&kvm_cpu_notifier); | 2086 | r = register_cpu_notifier(&kvm_cpu_notifier); |
2753 | if (r) | 2087 | if (r) |
2754 | goto out_free_2; | 2088 | goto out_free_2; |
@@ -2798,7 +2132,6 @@ out_free_3: | |||
2798 | unregister_reboot_notifier(&kvm_reboot_notifier); | 2132 | unregister_reboot_notifier(&kvm_reboot_notifier); |
2799 | unregister_cpu_notifier(&kvm_cpu_notifier); | 2133 | unregister_cpu_notifier(&kvm_cpu_notifier); |
2800 | out_free_2: | 2134 | out_free_2: |
2801 | on_each_cpu(hardware_disable, NULL, 1); | ||
2802 | out_free_1: | 2135 | out_free_1: |
2803 | kvm_arch_hardware_unsetup(); | 2136 | kvm_arch_hardware_unsetup(); |
2804 | out_free_0a: | 2137 | out_free_0a: |