diff options
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/assigned-dev.c | 818 | ||||
-rw-r--r-- | virt/kvm/eventfd.c | 2 | ||||
-rw-r--r-- | virt/kvm/ioapic.c | 80 | ||||
-rw-r--r-- | virt/kvm/ioapic.h | 5 | ||||
-rw-r--r-- | virt/kvm/irq_comm.c | 231 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 961 |
6 files changed, 1167 insertions, 930 deletions
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c new file mode 100644 index 00000000000..fd9c097b760 --- /dev/null +++ b/virt/kvm/assigned-dev.c | |||
@@ -0,0 +1,818 @@ | |||
1 | /* | ||
2 | * Kernel-based Virtual Machine - device assignment support | ||
3 | * | ||
4 | * Copyright (C) 2006-9 Red Hat, Inc | ||
5 | * | ||
6 | * This work is licensed under the terms of the GNU GPL, version 2. See | ||
7 | * the COPYING file in the top-level directory. | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/kvm_host.h> | ||
12 | #include <linux/kvm.h> | ||
13 | #include <linux/uaccess.h> | ||
14 | #include <linux/vmalloc.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include "irq.h" | ||
20 | |||
21 | static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, | ||
22 | int assigned_dev_id) | ||
23 | { | ||
24 | struct list_head *ptr; | ||
25 | struct kvm_assigned_dev_kernel *match; | ||
26 | |||
27 | list_for_each(ptr, head) { | ||
28 | match = list_entry(ptr, struct kvm_assigned_dev_kernel, list); | ||
29 | if (match->assigned_dev_id == assigned_dev_id) | ||
30 | return match; | ||
31 | } | ||
32 | return NULL; | ||
33 | } | ||
34 | |||
35 | static int find_index_from_host_irq(struct kvm_assigned_dev_kernel | ||
36 | *assigned_dev, int irq) | ||
37 | { | ||
38 | int i, index; | ||
39 | struct msix_entry *host_msix_entries; | ||
40 | |||
41 | host_msix_entries = assigned_dev->host_msix_entries; | ||
42 | |||
43 | index = -1; | ||
44 | for (i = 0; i < assigned_dev->entries_nr; i++) | ||
45 | if (irq == host_msix_entries[i].vector) { | ||
46 | index = i; | ||
47 | break; | ||
48 | } | ||
49 | if (index < 0) { | ||
50 | printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n"); | ||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | return index; | ||
55 | } | ||
56 | |||
57 | static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) | ||
58 | { | ||
59 | struct kvm_assigned_dev_kernel *assigned_dev; | ||
60 | struct kvm *kvm; | ||
61 | int i; | ||
62 | |||
63 | assigned_dev = container_of(work, struct kvm_assigned_dev_kernel, | ||
64 | interrupt_work); | ||
65 | kvm = assigned_dev->kvm; | ||
66 | |||
67 | spin_lock_irq(&assigned_dev->assigned_dev_lock); | ||
68 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | ||
69 | struct kvm_guest_msix_entry *guest_entries = | ||
70 | assigned_dev->guest_msix_entries; | ||
71 | for (i = 0; i < assigned_dev->entries_nr; i++) { | ||
72 | if (!(guest_entries[i].flags & | ||
73 | KVM_ASSIGNED_MSIX_PENDING)) | ||
74 | continue; | ||
75 | guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING; | ||
76 | kvm_set_irq(assigned_dev->kvm, | ||
77 | assigned_dev->irq_source_id, | ||
78 | guest_entries[i].vector, 1); | ||
79 | } | ||
80 | } else | ||
81 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, | ||
82 | assigned_dev->guest_irq, 1); | ||
83 | |||
84 | spin_unlock_irq(&assigned_dev->assigned_dev_lock); | ||
85 | } | ||
86 | |||
87 | static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) | ||
88 | { | ||
89 | unsigned long flags; | ||
90 | struct kvm_assigned_dev_kernel *assigned_dev = | ||
91 | (struct kvm_assigned_dev_kernel *) dev_id; | ||
92 | |||
93 | spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags); | ||
94 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | ||
95 | int index = find_index_from_host_irq(assigned_dev, irq); | ||
96 | if (index < 0) | ||
97 | goto out; | ||
98 | assigned_dev->guest_msix_entries[index].flags |= | ||
99 | KVM_ASSIGNED_MSIX_PENDING; | ||
100 | } | ||
101 | |||
102 | schedule_work(&assigned_dev->interrupt_work); | ||
103 | |||
104 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { | ||
105 | disable_irq_nosync(irq); | ||
106 | assigned_dev->host_irq_disabled = true; | ||
107 | } | ||
108 | |||
109 | out: | ||
110 | spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags); | ||
111 | return IRQ_HANDLED; | ||
112 | } | ||
113 | |||
114 | /* Ack the irq line for an assigned device */ | ||
115 | static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) | ||
116 | { | ||
117 | struct kvm_assigned_dev_kernel *dev; | ||
118 | unsigned long flags; | ||
119 | |||
120 | if (kian->gsi == -1) | ||
121 | return; | ||
122 | |||
123 | dev = container_of(kian, struct kvm_assigned_dev_kernel, | ||
124 | ack_notifier); | ||
125 | |||
126 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); | ||
127 | |||
128 | /* The guest irq may be shared so this ack may be | ||
129 | * from another device. | ||
130 | */ | ||
131 | spin_lock_irqsave(&dev->assigned_dev_lock, flags); | ||
132 | if (dev->host_irq_disabled) { | ||
133 | enable_irq(dev->host_irq); | ||
134 | dev->host_irq_disabled = false; | ||
135 | } | ||
136 | spin_unlock_irqrestore(&dev->assigned_dev_lock, flags); | ||
137 | } | ||
138 | |||
139 | static void deassign_guest_irq(struct kvm *kvm, | ||
140 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
141 | { | ||
142 | kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); | ||
143 | assigned_dev->ack_notifier.gsi = -1; | ||
144 | |||
145 | if (assigned_dev->irq_source_id != -1) | ||
146 | kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); | ||
147 | assigned_dev->irq_source_id = -1; | ||
148 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK); | ||
149 | } | ||
150 | |||
151 | /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */ | ||
152 | static void deassign_host_irq(struct kvm *kvm, | ||
153 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
154 | { | ||
155 | /* | ||
156 | * In kvm_free_device_irq, cancel_work_sync return true if: | ||
157 | * 1. work is scheduled, and then cancelled. | ||
158 | * 2. work callback is executed. | ||
159 | * | ||
160 | * The first one ensured that the irq is disabled and no more events | ||
161 | * would happen. But for the second one, the irq may be enabled (e.g. | ||
162 | * for MSI). So we disable irq here to prevent further events. | ||
163 | * | ||
164 | * Notice this maybe result in nested disable if the interrupt type is | ||
165 | * INTx, but it's OK for we are going to free it. | ||
166 | * | ||
167 | * If this function is a part of VM destroy, please ensure that till | ||
168 | * now, the kvm state is still legal for probably we also have to wait | ||
169 | * interrupt_work done. | ||
170 | */ | ||
171 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | ||
172 | int i; | ||
173 | for (i = 0; i < assigned_dev->entries_nr; i++) | ||
174 | disable_irq_nosync(assigned_dev-> | ||
175 | host_msix_entries[i].vector); | ||
176 | |||
177 | cancel_work_sync(&assigned_dev->interrupt_work); | ||
178 | |||
179 | for (i = 0; i < assigned_dev->entries_nr; i++) | ||
180 | free_irq(assigned_dev->host_msix_entries[i].vector, | ||
181 | (void *)assigned_dev); | ||
182 | |||
183 | assigned_dev->entries_nr = 0; | ||
184 | kfree(assigned_dev->host_msix_entries); | ||
185 | kfree(assigned_dev->guest_msix_entries); | ||
186 | pci_disable_msix(assigned_dev->dev); | ||
187 | } else { | ||
188 | /* Deal with MSI and INTx */ | ||
189 | disable_irq_nosync(assigned_dev->host_irq); | ||
190 | cancel_work_sync(&assigned_dev->interrupt_work); | ||
191 | |||
192 | free_irq(assigned_dev->host_irq, (void *)assigned_dev); | ||
193 | |||
194 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI) | ||
195 | pci_disable_msi(assigned_dev->dev); | ||
196 | } | ||
197 | |||
198 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK); | ||
199 | } | ||
200 | |||
201 | static int kvm_deassign_irq(struct kvm *kvm, | ||
202 | struct kvm_assigned_dev_kernel *assigned_dev, | ||
203 | unsigned long irq_requested_type) | ||
204 | { | ||
205 | unsigned long guest_irq_type, host_irq_type; | ||
206 | |||
207 | if (!irqchip_in_kernel(kvm)) | ||
208 | return -EINVAL; | ||
209 | /* no irq assignment to deassign */ | ||
210 | if (!assigned_dev->irq_requested_type) | ||
211 | return -ENXIO; | ||
212 | |||
213 | host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK; | ||
214 | guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK; | ||
215 | |||
216 | if (host_irq_type) | ||
217 | deassign_host_irq(kvm, assigned_dev); | ||
218 | if (guest_irq_type) | ||
219 | deassign_guest_irq(kvm, assigned_dev); | ||
220 | |||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | static void kvm_free_assigned_irq(struct kvm *kvm, | ||
225 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
226 | { | ||
227 | kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type); | ||
228 | } | ||
229 | |||
230 | static void kvm_free_assigned_device(struct kvm *kvm, | ||
231 | struct kvm_assigned_dev_kernel | ||
232 | *assigned_dev) | ||
233 | { | ||
234 | kvm_free_assigned_irq(kvm, assigned_dev); | ||
235 | |||
236 | pci_reset_function(assigned_dev->dev); | ||
237 | |||
238 | pci_release_regions(assigned_dev->dev); | ||
239 | pci_disable_device(assigned_dev->dev); | ||
240 | pci_dev_put(assigned_dev->dev); | ||
241 | |||
242 | list_del(&assigned_dev->list); | ||
243 | kfree(assigned_dev); | ||
244 | } | ||
245 | |||
246 | void kvm_free_all_assigned_devices(struct kvm *kvm) | ||
247 | { | ||
248 | struct list_head *ptr, *ptr2; | ||
249 | struct kvm_assigned_dev_kernel *assigned_dev; | ||
250 | |||
251 | list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { | ||
252 | assigned_dev = list_entry(ptr, | ||
253 | struct kvm_assigned_dev_kernel, | ||
254 | list); | ||
255 | |||
256 | kvm_free_assigned_device(kvm, assigned_dev); | ||
257 | } | ||
258 | } | ||
259 | |||
260 | static int assigned_device_enable_host_intx(struct kvm *kvm, | ||
261 | struct kvm_assigned_dev_kernel *dev) | ||
262 | { | ||
263 | dev->host_irq = dev->dev->irq; | ||
264 | /* Even though this is PCI, we don't want to use shared | ||
265 | * interrupts. Sharing host devices with guest-assigned devices | ||
266 | * on the same interrupt line is not a happy situation: there | ||
267 | * are going to be long delays in accepting, acking, etc. | ||
268 | */ | ||
269 | if (request_irq(dev->host_irq, kvm_assigned_dev_intr, | ||
270 | 0, "kvm_assigned_intx_device", (void *)dev)) | ||
271 | return -EIO; | ||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | #ifdef __KVM_HAVE_MSI | ||
276 | static int assigned_device_enable_host_msi(struct kvm *kvm, | ||
277 | struct kvm_assigned_dev_kernel *dev) | ||
278 | { | ||
279 | int r; | ||
280 | |||
281 | if (!dev->dev->msi_enabled) { | ||
282 | r = pci_enable_msi(dev->dev); | ||
283 | if (r) | ||
284 | return r; | ||
285 | } | ||
286 | |||
287 | dev->host_irq = dev->dev->irq; | ||
288 | if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0, | ||
289 | "kvm_assigned_msi_device", (void *)dev)) { | ||
290 | pci_disable_msi(dev->dev); | ||
291 | return -EIO; | ||
292 | } | ||
293 | |||
294 | return 0; | ||
295 | } | ||
296 | #endif | ||
297 | |||
298 | #ifdef __KVM_HAVE_MSIX | ||
299 | static int assigned_device_enable_host_msix(struct kvm *kvm, | ||
300 | struct kvm_assigned_dev_kernel *dev) | ||
301 | { | ||
302 | int i, r = -EINVAL; | ||
303 | |||
304 | /* host_msix_entries and guest_msix_entries should have been | ||
305 | * initialized */ | ||
306 | if (dev->entries_nr == 0) | ||
307 | return r; | ||
308 | |||
309 | r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr); | ||
310 | if (r) | ||
311 | return r; | ||
312 | |||
313 | for (i = 0; i < dev->entries_nr; i++) { | ||
314 | r = request_irq(dev->host_msix_entries[i].vector, | ||
315 | kvm_assigned_dev_intr, 0, | ||
316 | "kvm_assigned_msix_device", | ||
317 | (void *)dev); | ||
318 | /* FIXME: free requested_irq's on failure */ | ||
319 | if (r) | ||
320 | return r; | ||
321 | } | ||
322 | |||
323 | return 0; | ||
324 | } | ||
325 | |||
326 | #endif | ||
327 | |||
328 | static int assigned_device_enable_guest_intx(struct kvm *kvm, | ||
329 | struct kvm_assigned_dev_kernel *dev, | ||
330 | struct kvm_assigned_irq *irq) | ||
331 | { | ||
332 | dev->guest_irq = irq->guest_irq; | ||
333 | dev->ack_notifier.gsi = irq->guest_irq; | ||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | #ifdef __KVM_HAVE_MSI | ||
338 | static int assigned_device_enable_guest_msi(struct kvm *kvm, | ||
339 | struct kvm_assigned_dev_kernel *dev, | ||
340 | struct kvm_assigned_irq *irq) | ||
341 | { | ||
342 | dev->guest_irq = irq->guest_irq; | ||
343 | dev->ack_notifier.gsi = -1; | ||
344 | dev->host_irq_disabled = false; | ||
345 | return 0; | ||
346 | } | ||
347 | #endif | ||
348 | |||
349 | #ifdef __KVM_HAVE_MSIX | ||
350 | static int assigned_device_enable_guest_msix(struct kvm *kvm, | ||
351 | struct kvm_assigned_dev_kernel *dev, | ||
352 | struct kvm_assigned_irq *irq) | ||
353 | { | ||
354 | dev->guest_irq = irq->guest_irq; | ||
355 | dev->ack_notifier.gsi = -1; | ||
356 | dev->host_irq_disabled = false; | ||
357 | return 0; | ||
358 | } | ||
359 | #endif | ||
360 | |||
361 | static int assign_host_irq(struct kvm *kvm, | ||
362 | struct kvm_assigned_dev_kernel *dev, | ||
363 | __u32 host_irq_type) | ||
364 | { | ||
365 | int r = -EEXIST; | ||
366 | |||
367 | if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK) | ||
368 | return r; | ||
369 | |||
370 | switch (host_irq_type) { | ||
371 | case KVM_DEV_IRQ_HOST_INTX: | ||
372 | r = assigned_device_enable_host_intx(kvm, dev); | ||
373 | break; | ||
374 | #ifdef __KVM_HAVE_MSI | ||
375 | case KVM_DEV_IRQ_HOST_MSI: | ||
376 | r = assigned_device_enable_host_msi(kvm, dev); | ||
377 | break; | ||
378 | #endif | ||
379 | #ifdef __KVM_HAVE_MSIX | ||
380 | case KVM_DEV_IRQ_HOST_MSIX: | ||
381 | r = assigned_device_enable_host_msix(kvm, dev); | ||
382 | break; | ||
383 | #endif | ||
384 | default: | ||
385 | r = -EINVAL; | ||
386 | } | ||
387 | |||
388 | if (!r) | ||
389 | dev->irq_requested_type |= host_irq_type; | ||
390 | |||
391 | return r; | ||
392 | } | ||
393 | |||
394 | static int assign_guest_irq(struct kvm *kvm, | ||
395 | struct kvm_assigned_dev_kernel *dev, | ||
396 | struct kvm_assigned_irq *irq, | ||
397 | unsigned long guest_irq_type) | ||
398 | { | ||
399 | int id; | ||
400 | int r = -EEXIST; | ||
401 | |||
402 | if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK) | ||
403 | return r; | ||
404 | |||
405 | id = kvm_request_irq_source_id(kvm); | ||
406 | if (id < 0) | ||
407 | return id; | ||
408 | |||
409 | dev->irq_source_id = id; | ||
410 | |||
411 | switch (guest_irq_type) { | ||
412 | case KVM_DEV_IRQ_GUEST_INTX: | ||
413 | r = assigned_device_enable_guest_intx(kvm, dev, irq); | ||
414 | break; | ||
415 | #ifdef __KVM_HAVE_MSI | ||
416 | case KVM_DEV_IRQ_GUEST_MSI: | ||
417 | r = assigned_device_enable_guest_msi(kvm, dev, irq); | ||
418 | break; | ||
419 | #endif | ||
420 | #ifdef __KVM_HAVE_MSIX | ||
421 | case KVM_DEV_IRQ_GUEST_MSIX: | ||
422 | r = assigned_device_enable_guest_msix(kvm, dev, irq); | ||
423 | break; | ||
424 | #endif | ||
425 | default: | ||
426 | r = -EINVAL; | ||
427 | } | ||
428 | |||
429 | if (!r) { | ||
430 | dev->irq_requested_type |= guest_irq_type; | ||
431 | kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier); | ||
432 | } else | ||
433 | kvm_free_irq_source_id(kvm, dev->irq_source_id); | ||
434 | |||
435 | return r; | ||
436 | } | ||
437 | |||
438 | /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */ | ||
439 | static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, | ||
440 | struct kvm_assigned_irq *assigned_irq) | ||
441 | { | ||
442 | int r = -EINVAL; | ||
443 | struct kvm_assigned_dev_kernel *match; | ||
444 | unsigned long host_irq_type, guest_irq_type; | ||
445 | |||
446 | if (!capable(CAP_SYS_RAWIO)) | ||
447 | return -EPERM; | ||
448 | |||
449 | if (!irqchip_in_kernel(kvm)) | ||
450 | return r; | ||
451 | |||
452 | mutex_lock(&kvm->lock); | ||
453 | r = -ENODEV; | ||
454 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
455 | assigned_irq->assigned_dev_id); | ||
456 | if (!match) | ||
457 | goto out; | ||
458 | |||
459 | host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK); | ||
460 | guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK); | ||
461 | |||
462 | r = -EINVAL; | ||
463 | /* can only assign one type at a time */ | ||
464 | if (hweight_long(host_irq_type) > 1) | ||
465 | goto out; | ||
466 | if (hweight_long(guest_irq_type) > 1) | ||
467 | goto out; | ||
468 | if (host_irq_type == 0 && guest_irq_type == 0) | ||
469 | goto out; | ||
470 | |||
471 | r = 0; | ||
472 | if (host_irq_type) | ||
473 | r = assign_host_irq(kvm, match, host_irq_type); | ||
474 | if (r) | ||
475 | goto out; | ||
476 | |||
477 | if (guest_irq_type) | ||
478 | r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type); | ||
479 | out: | ||
480 | mutex_unlock(&kvm->lock); | ||
481 | return r; | ||
482 | } | ||
483 | |||
484 | static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, | ||
485 | struct kvm_assigned_irq | ||
486 | *assigned_irq) | ||
487 | { | ||
488 | int r = -ENODEV; | ||
489 | struct kvm_assigned_dev_kernel *match; | ||
490 | |||
491 | mutex_lock(&kvm->lock); | ||
492 | |||
493 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
494 | assigned_irq->assigned_dev_id); | ||
495 | if (!match) | ||
496 | goto out; | ||
497 | |||
498 | r = kvm_deassign_irq(kvm, match, assigned_irq->flags); | ||
499 | out: | ||
500 | mutex_unlock(&kvm->lock); | ||
501 | return r; | ||
502 | } | ||
503 | |||
504 | static int kvm_vm_ioctl_assign_device(struct kvm *kvm, | ||
505 | struct kvm_assigned_pci_dev *assigned_dev) | ||
506 | { | ||
507 | int r = 0; | ||
508 | struct kvm_assigned_dev_kernel *match; | ||
509 | struct pci_dev *dev; | ||
510 | |||
511 | down_read(&kvm->slots_lock); | ||
512 | mutex_lock(&kvm->lock); | ||
513 | |||
514 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
515 | assigned_dev->assigned_dev_id); | ||
516 | if (match) { | ||
517 | /* device already assigned */ | ||
518 | r = -EEXIST; | ||
519 | goto out; | ||
520 | } | ||
521 | |||
522 | match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL); | ||
523 | if (match == NULL) { | ||
524 | printk(KERN_INFO "%s: Couldn't allocate memory\n", | ||
525 | __func__); | ||
526 | r = -ENOMEM; | ||
527 | goto out; | ||
528 | } | ||
529 | dev = pci_get_bus_and_slot(assigned_dev->busnr, | ||
530 | assigned_dev->devfn); | ||
531 | if (!dev) { | ||
532 | printk(KERN_INFO "%s: host device not found\n", __func__); | ||
533 | r = -EINVAL; | ||
534 | goto out_free; | ||
535 | } | ||
536 | if (pci_enable_device(dev)) { | ||
537 | printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); | ||
538 | r = -EBUSY; | ||
539 | goto out_put; | ||
540 | } | ||
541 | r = pci_request_regions(dev, "kvm_assigned_device"); | ||
542 | if (r) { | ||
543 | printk(KERN_INFO "%s: Could not get access to device regions\n", | ||
544 | __func__); | ||
545 | goto out_disable; | ||
546 | } | ||
547 | |||
548 | pci_reset_function(dev); | ||
549 | |||
550 | match->assigned_dev_id = assigned_dev->assigned_dev_id; | ||
551 | match->host_busnr = assigned_dev->busnr; | ||
552 | match->host_devfn = assigned_dev->devfn; | ||
553 | match->flags = assigned_dev->flags; | ||
554 | match->dev = dev; | ||
555 | spin_lock_init(&match->assigned_dev_lock); | ||
556 | match->irq_source_id = -1; | ||
557 | match->kvm = kvm; | ||
558 | match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; | ||
559 | INIT_WORK(&match->interrupt_work, | ||
560 | kvm_assigned_dev_interrupt_work_handler); | ||
561 | |||
562 | list_add(&match->list, &kvm->arch.assigned_dev_head); | ||
563 | |||
564 | if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { | ||
565 | if (!kvm->arch.iommu_domain) { | ||
566 | r = kvm_iommu_map_guest(kvm); | ||
567 | if (r) | ||
568 | goto out_list_del; | ||
569 | } | ||
570 | r = kvm_assign_device(kvm, match); | ||
571 | if (r) | ||
572 | goto out_list_del; | ||
573 | } | ||
574 | |||
575 | out: | ||
576 | mutex_unlock(&kvm->lock); | ||
577 | up_read(&kvm->slots_lock); | ||
578 | return r; | ||
579 | out_list_del: | ||
580 | list_del(&match->list); | ||
581 | pci_release_regions(dev); | ||
582 | out_disable: | ||
583 | pci_disable_device(dev); | ||
584 | out_put: | ||
585 | pci_dev_put(dev); | ||
586 | out_free: | ||
587 | kfree(match); | ||
588 | mutex_unlock(&kvm->lock); | ||
589 | up_read(&kvm->slots_lock); | ||
590 | return r; | ||
591 | } | ||
592 | |||
593 | static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, | ||
594 | struct kvm_assigned_pci_dev *assigned_dev) | ||
595 | { | ||
596 | int r = 0; | ||
597 | struct kvm_assigned_dev_kernel *match; | ||
598 | |||
599 | mutex_lock(&kvm->lock); | ||
600 | |||
601 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
602 | assigned_dev->assigned_dev_id); | ||
603 | if (!match) { | ||
604 | printk(KERN_INFO "%s: device hasn't been assigned before, " | ||
605 | "so cannot be deassigned\n", __func__); | ||
606 | r = -EINVAL; | ||
607 | goto out; | ||
608 | } | ||
609 | |||
610 | if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) | ||
611 | kvm_deassign_device(kvm, match); | ||
612 | |||
613 | kvm_free_assigned_device(kvm, match); | ||
614 | |||
615 | out: | ||
616 | mutex_unlock(&kvm->lock); | ||
617 | return r; | ||
618 | } | ||
619 | |||
620 | |||
621 | #ifdef __KVM_HAVE_MSIX | ||
622 | static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm, | ||
623 | struct kvm_assigned_msix_nr *entry_nr) | ||
624 | { | ||
625 | int r = 0; | ||
626 | struct kvm_assigned_dev_kernel *adev; | ||
627 | |||
628 | mutex_lock(&kvm->lock); | ||
629 | |||
630 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
631 | entry_nr->assigned_dev_id); | ||
632 | if (!adev) { | ||
633 | r = -EINVAL; | ||
634 | goto msix_nr_out; | ||
635 | } | ||
636 | |||
637 | if (adev->entries_nr == 0) { | ||
638 | adev->entries_nr = entry_nr->entry_nr; | ||
639 | if (adev->entries_nr == 0 || | ||
640 | adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) { | ||
641 | r = -EINVAL; | ||
642 | goto msix_nr_out; | ||
643 | } | ||
644 | |||
645 | adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) * | ||
646 | entry_nr->entry_nr, | ||
647 | GFP_KERNEL); | ||
648 | if (!adev->host_msix_entries) { | ||
649 | r = -ENOMEM; | ||
650 | goto msix_nr_out; | ||
651 | } | ||
652 | adev->guest_msix_entries = kzalloc( | ||
653 | sizeof(struct kvm_guest_msix_entry) * | ||
654 | entry_nr->entry_nr, GFP_KERNEL); | ||
655 | if (!adev->guest_msix_entries) { | ||
656 | kfree(adev->host_msix_entries); | ||
657 | r = -ENOMEM; | ||
658 | goto msix_nr_out; | ||
659 | } | ||
660 | } else /* Not allowed set MSI-X number twice */ | ||
661 | r = -EINVAL; | ||
662 | msix_nr_out: | ||
663 | mutex_unlock(&kvm->lock); | ||
664 | return r; | ||
665 | } | ||
666 | |||
667 | static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm, | ||
668 | struct kvm_assigned_msix_entry *entry) | ||
669 | { | ||
670 | int r = 0, i; | ||
671 | struct kvm_assigned_dev_kernel *adev; | ||
672 | |||
673 | mutex_lock(&kvm->lock); | ||
674 | |||
675 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
676 | entry->assigned_dev_id); | ||
677 | |||
678 | if (!adev) { | ||
679 | r = -EINVAL; | ||
680 | goto msix_entry_out; | ||
681 | } | ||
682 | |||
683 | for (i = 0; i < adev->entries_nr; i++) | ||
684 | if (adev->guest_msix_entries[i].vector == 0 || | ||
685 | adev->guest_msix_entries[i].entry == entry->entry) { | ||
686 | adev->guest_msix_entries[i].entry = entry->entry; | ||
687 | adev->guest_msix_entries[i].vector = entry->gsi; | ||
688 | adev->host_msix_entries[i].entry = entry->entry; | ||
689 | break; | ||
690 | } | ||
691 | if (i == adev->entries_nr) { | ||
692 | r = -ENOSPC; | ||
693 | goto msix_entry_out; | ||
694 | } | ||
695 | |||
696 | msix_entry_out: | ||
697 | mutex_unlock(&kvm->lock); | ||
698 | |||
699 | return r; | ||
700 | } | ||
701 | #endif | ||
702 | |||
703 | long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, | ||
704 | unsigned long arg) | ||
705 | { | ||
706 | void __user *argp = (void __user *)arg; | ||
707 | int r = -ENOTTY; | ||
708 | |||
709 | switch (ioctl) { | ||
710 | case KVM_ASSIGN_PCI_DEVICE: { | ||
711 | struct kvm_assigned_pci_dev assigned_dev; | ||
712 | |||
713 | r = -EFAULT; | ||
714 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | ||
715 | goto out; | ||
716 | r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev); | ||
717 | if (r) | ||
718 | goto out; | ||
719 | break; | ||
720 | } | ||
721 | case KVM_ASSIGN_IRQ: { | ||
722 | r = -EOPNOTSUPP; | ||
723 | break; | ||
724 | } | ||
725 | #ifdef KVM_CAP_ASSIGN_DEV_IRQ | ||
726 | case KVM_ASSIGN_DEV_IRQ: { | ||
727 | struct kvm_assigned_irq assigned_irq; | ||
728 | |||
729 | r = -EFAULT; | ||
730 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) | ||
731 | goto out; | ||
732 | r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq); | ||
733 | if (r) | ||
734 | goto out; | ||
735 | break; | ||
736 | } | ||
737 | case KVM_DEASSIGN_DEV_IRQ: { | ||
738 | struct kvm_assigned_irq assigned_irq; | ||
739 | |||
740 | r = -EFAULT; | ||
741 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) | ||
742 | goto out; | ||
743 | r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq); | ||
744 | if (r) | ||
745 | goto out; | ||
746 | break; | ||
747 | } | ||
748 | #endif | ||
749 | #ifdef KVM_CAP_DEVICE_DEASSIGNMENT | ||
750 | case KVM_DEASSIGN_PCI_DEVICE: { | ||
751 | struct kvm_assigned_pci_dev assigned_dev; | ||
752 | |||
753 | r = -EFAULT; | ||
754 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | ||
755 | goto out; | ||
756 | r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev); | ||
757 | if (r) | ||
758 | goto out; | ||
759 | break; | ||
760 | } | ||
761 | #endif | ||
762 | #ifdef KVM_CAP_IRQ_ROUTING | ||
763 | case KVM_SET_GSI_ROUTING: { | ||
764 | struct kvm_irq_routing routing; | ||
765 | struct kvm_irq_routing __user *urouting; | ||
766 | struct kvm_irq_routing_entry *entries; | ||
767 | |||
768 | r = -EFAULT; | ||
769 | if (copy_from_user(&routing, argp, sizeof(routing))) | ||
770 | goto out; | ||
771 | r = -EINVAL; | ||
772 | if (routing.nr >= KVM_MAX_IRQ_ROUTES) | ||
773 | goto out; | ||
774 | if (routing.flags) | ||
775 | goto out; | ||
776 | r = -ENOMEM; | ||
777 | entries = vmalloc(routing.nr * sizeof(*entries)); | ||
778 | if (!entries) | ||
779 | goto out; | ||
780 | r = -EFAULT; | ||
781 | urouting = argp; | ||
782 | if (copy_from_user(entries, urouting->entries, | ||
783 | routing.nr * sizeof(*entries))) | ||
784 | goto out_free_irq_routing; | ||
785 | r = kvm_set_irq_routing(kvm, entries, routing.nr, | ||
786 | routing.flags); | ||
787 | out_free_irq_routing: | ||
788 | vfree(entries); | ||
789 | break; | ||
790 | } | ||
791 | #endif /* KVM_CAP_IRQ_ROUTING */ | ||
792 | #ifdef __KVM_HAVE_MSIX | ||
793 | case KVM_ASSIGN_SET_MSIX_NR: { | ||
794 | struct kvm_assigned_msix_nr entry_nr; | ||
795 | r = -EFAULT; | ||
796 | if (copy_from_user(&entry_nr, argp, sizeof entry_nr)) | ||
797 | goto out; | ||
798 | r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr); | ||
799 | if (r) | ||
800 | goto out; | ||
801 | break; | ||
802 | } | ||
803 | case KVM_ASSIGN_SET_MSIX_ENTRY: { | ||
804 | struct kvm_assigned_msix_entry entry; | ||
805 | r = -EFAULT; | ||
806 | if (copy_from_user(&entry, argp, sizeof entry)) | ||
807 | goto out; | ||
808 | r = kvm_vm_ioctl_set_msix_entry(kvm, &entry); | ||
809 | if (r) | ||
810 | goto out; | ||
811 | break; | ||
812 | } | ||
813 | #endif | ||
814 | } | ||
815 | out: | ||
816 | return r; | ||
817 | } | ||
818 | |||
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index bb4ebd89b9f..30f70fd511c 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
@@ -61,10 +61,8 @@ irqfd_inject(struct work_struct *work) | |||
61 | struct _irqfd *irqfd = container_of(work, struct _irqfd, inject); | 61 | struct _irqfd *irqfd = container_of(work, struct _irqfd, inject); |
62 | struct kvm *kvm = irqfd->kvm; | 62 | struct kvm *kvm = irqfd->kvm; |
63 | 63 | ||
64 | mutex_lock(&kvm->irq_lock); | ||
65 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1); | 64 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1); |
66 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0); | 65 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0); |
67 | mutex_unlock(&kvm->irq_lock); | ||
68 | } | 66 | } |
69 | 67 | ||
70 | /* | 68 | /* |
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 9fe140bb38e..38a2d20b89d 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -182,6 +182,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | |||
182 | union kvm_ioapic_redirect_entry entry; | 182 | union kvm_ioapic_redirect_entry entry; |
183 | int ret = 1; | 183 | int ret = 1; |
184 | 184 | ||
185 | mutex_lock(&ioapic->lock); | ||
185 | if (irq >= 0 && irq < IOAPIC_NUM_PINS) { | 186 | if (irq >= 0 && irq < IOAPIC_NUM_PINS) { |
186 | entry = ioapic->redirtbl[irq]; | 187 | entry = ioapic->redirtbl[irq]; |
187 | level ^= entry.fields.polarity; | 188 | level ^= entry.fields.polarity; |
@@ -198,34 +199,51 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | |||
198 | } | 199 | } |
199 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); | 200 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); |
200 | } | 201 | } |
202 | mutex_unlock(&ioapic->lock); | ||
203 | |||
201 | return ret; | 204 | return ret; |
202 | } | 205 | } |
203 | 206 | ||
204 | static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int pin, | 207 | static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, |
205 | int trigger_mode) | 208 | int trigger_mode) |
206 | { | 209 | { |
207 | union kvm_ioapic_redirect_entry *ent; | 210 | int i; |
211 | |||
212 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { | ||
213 | union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; | ||
208 | 214 | ||
209 | ent = &ioapic->redirtbl[pin]; | 215 | if (ent->fields.vector != vector) |
216 | continue; | ||
210 | 217 | ||
211 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, pin); | 218 | /* |
219 | * We are dropping lock while calling ack notifiers because ack | ||
220 | * notifier callbacks for assigned devices call into IOAPIC | ||
221 | * recursively. Since remote_irr is cleared only after call | ||
222 | * to notifiers if the same vector will be delivered while lock | ||
223 | * is dropped it will be put into irr and will be delivered | ||
224 | * after ack notifier returns. | ||
225 | */ | ||
226 | mutex_unlock(&ioapic->lock); | ||
227 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); | ||
228 | mutex_lock(&ioapic->lock); | ||
229 | |||
230 | if (trigger_mode != IOAPIC_LEVEL_TRIG) | ||
231 | continue; | ||
212 | 232 | ||
213 | if (trigger_mode == IOAPIC_LEVEL_TRIG) { | ||
214 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); | 233 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); |
215 | ent->fields.remote_irr = 0; | 234 | ent->fields.remote_irr = 0; |
216 | if (!ent->fields.mask && (ioapic->irr & (1 << pin))) | 235 | if (!ent->fields.mask && (ioapic->irr & (1 << i))) |
217 | ioapic_service(ioapic, pin); | 236 | ioapic_service(ioapic, i); |
218 | } | 237 | } |
219 | } | 238 | } |
220 | 239 | ||
221 | void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) | 240 | void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) |
222 | { | 241 | { |
223 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | 242 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; |
224 | int i; | ||
225 | 243 | ||
226 | for (i = 0; i < IOAPIC_NUM_PINS; i++) | 244 | mutex_lock(&ioapic->lock); |
227 | if (ioapic->redirtbl[i].fields.vector == vector) | 245 | __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); |
228 | __kvm_ioapic_update_eoi(ioapic, i, trigger_mode); | 246 | mutex_unlock(&ioapic->lock); |
229 | } | 247 | } |
230 | 248 | ||
231 | static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) | 249 | static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) |
@@ -250,8 +268,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, | |||
250 | ioapic_debug("addr %lx\n", (unsigned long)addr); | 268 | ioapic_debug("addr %lx\n", (unsigned long)addr); |
251 | ASSERT(!(addr & 0xf)); /* check alignment */ | 269 | ASSERT(!(addr & 0xf)); /* check alignment */ |
252 | 270 | ||
253 | mutex_lock(&ioapic->kvm->irq_lock); | ||
254 | addr &= 0xff; | 271 | addr &= 0xff; |
272 | mutex_lock(&ioapic->lock); | ||
255 | switch (addr) { | 273 | switch (addr) { |
256 | case IOAPIC_REG_SELECT: | 274 | case IOAPIC_REG_SELECT: |
257 | result = ioapic->ioregsel; | 275 | result = ioapic->ioregsel; |
@@ -265,6 +283,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, | |||
265 | result = 0; | 283 | result = 0; |
266 | break; | 284 | break; |
267 | } | 285 | } |
286 | mutex_unlock(&ioapic->lock); | ||
287 | |||
268 | switch (len) { | 288 | switch (len) { |
269 | case 8: | 289 | case 8: |
270 | *(u64 *) val = result; | 290 | *(u64 *) val = result; |
@@ -277,7 +297,6 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, | |||
277 | default: | 297 | default: |
278 | printk(KERN_WARNING "ioapic: wrong length %d\n", len); | 298 | printk(KERN_WARNING "ioapic: wrong length %d\n", len); |
279 | } | 299 | } |
280 | mutex_unlock(&ioapic->kvm->irq_lock); | ||
281 | return 0; | 300 | return 0; |
282 | } | 301 | } |
283 | 302 | ||
@@ -293,15 +312,15 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | |||
293 | (void*)addr, len, val); | 312 | (void*)addr, len, val); |
294 | ASSERT(!(addr & 0xf)); /* check alignment */ | 313 | ASSERT(!(addr & 0xf)); /* check alignment */ |
295 | 314 | ||
296 | mutex_lock(&ioapic->kvm->irq_lock); | ||
297 | if (len == 4 || len == 8) | 315 | if (len == 4 || len == 8) |
298 | data = *(u32 *) val; | 316 | data = *(u32 *) val; |
299 | else { | 317 | else { |
300 | printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); | 318 | printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); |
301 | goto unlock; | 319 | return 0; |
302 | } | 320 | } |
303 | 321 | ||
304 | addr &= 0xff; | 322 | addr &= 0xff; |
323 | mutex_lock(&ioapic->lock); | ||
305 | switch (addr) { | 324 | switch (addr) { |
306 | case IOAPIC_REG_SELECT: | 325 | case IOAPIC_REG_SELECT: |
307 | ioapic->ioregsel = data; | 326 | ioapic->ioregsel = data; |
@@ -312,15 +331,14 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | |||
312 | break; | 331 | break; |
313 | #ifdef CONFIG_IA64 | 332 | #ifdef CONFIG_IA64 |
314 | case IOAPIC_REG_EOI: | 333 | case IOAPIC_REG_EOI: |
315 | kvm_ioapic_update_eoi(ioapic->kvm, data, IOAPIC_LEVEL_TRIG); | 334 | __kvm_ioapic_update_eoi(ioapic, data, IOAPIC_LEVEL_TRIG); |
316 | break; | 335 | break; |
317 | #endif | 336 | #endif |
318 | 337 | ||
319 | default: | 338 | default: |
320 | break; | 339 | break; |
321 | } | 340 | } |
322 | unlock: | 341 | mutex_unlock(&ioapic->lock); |
323 | mutex_unlock(&ioapic->kvm->irq_lock); | ||
324 | return 0; | 342 | return 0; |
325 | } | 343 | } |
326 | 344 | ||
@@ -349,6 +367,7 @@ int kvm_ioapic_init(struct kvm *kvm) | |||
349 | ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); | 367 | ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); |
350 | if (!ioapic) | 368 | if (!ioapic) |
351 | return -ENOMEM; | 369 | return -ENOMEM; |
370 | mutex_init(&ioapic->lock); | ||
352 | kvm->arch.vioapic = ioapic; | 371 | kvm->arch.vioapic = ioapic; |
353 | kvm_ioapic_reset(ioapic); | 372 | kvm_ioapic_reset(ioapic); |
354 | kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); | 373 | kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); |
@@ -360,3 +379,26 @@ int kvm_ioapic_init(struct kvm *kvm) | |||
360 | return ret; | 379 | return ret; |
361 | } | 380 | } |
362 | 381 | ||
382 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | ||
383 | { | ||
384 | struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); | ||
385 | if (!ioapic) | ||
386 | return -EINVAL; | ||
387 | |||
388 | mutex_lock(&ioapic->lock); | ||
389 | memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); | ||
390 | mutex_unlock(&ioapic->lock); | ||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | ||
395 | { | ||
396 | struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); | ||
397 | if (!ioapic) | ||
398 | return -EINVAL; | ||
399 | |||
400 | mutex_lock(&ioapic->lock); | ||
401 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); | ||
402 | mutex_unlock(&ioapic->lock); | ||
403 | return 0; | ||
404 | } | ||
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h index 7080b713c16..419c43b667a 100644 --- a/virt/kvm/ioapic.h +++ b/virt/kvm/ioapic.h | |||
@@ -41,9 +41,11 @@ struct kvm_ioapic { | |||
41 | u32 irr; | 41 | u32 irr; |
42 | u32 pad; | 42 | u32 pad; |
43 | union kvm_ioapic_redirect_entry redirtbl[IOAPIC_NUM_PINS]; | 43 | union kvm_ioapic_redirect_entry redirtbl[IOAPIC_NUM_PINS]; |
44 | unsigned long irq_states[IOAPIC_NUM_PINS]; | ||
44 | struct kvm_io_device dev; | 45 | struct kvm_io_device dev; |
45 | struct kvm *kvm; | 46 | struct kvm *kvm; |
46 | void (*ack_notifier)(void *opaque, int irq); | 47 | void (*ack_notifier)(void *opaque, int irq); |
48 | struct mutex lock; | ||
47 | }; | 49 | }; |
48 | 50 | ||
49 | #ifdef DEBUG | 51 | #ifdef DEBUG |
@@ -73,4 +75,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); | |||
73 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); | 75 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); |
74 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | 76 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, |
75 | struct kvm_lapic_irq *irq); | 77 | struct kvm_lapic_irq *irq); |
78 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); | ||
79 | int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); | ||
80 | |||
76 | #endif | 81 | #endif |
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 001663ff401..9b077342ab5 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c | |||
@@ -31,20 +31,39 @@ | |||
31 | 31 | ||
32 | #include "ioapic.h" | 32 | #include "ioapic.h" |
33 | 33 | ||
34 | static inline int kvm_irq_line_state(unsigned long *irq_state, | ||
35 | int irq_source_id, int level) | ||
36 | { | ||
37 | /* Logical OR for level trig interrupt */ | ||
38 | if (level) | ||
39 | set_bit(irq_source_id, irq_state); | ||
40 | else | ||
41 | clear_bit(irq_source_id, irq_state); | ||
42 | |||
43 | return !!(*irq_state); | ||
44 | } | ||
45 | |||
34 | static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, | 46 | static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, |
35 | struct kvm *kvm, int level) | 47 | struct kvm *kvm, int irq_source_id, int level) |
36 | { | 48 | { |
37 | #ifdef CONFIG_X86 | 49 | #ifdef CONFIG_X86 |
38 | return kvm_pic_set_irq(pic_irqchip(kvm), e->irqchip.pin, level); | 50 | struct kvm_pic *pic = pic_irqchip(kvm); |
51 | level = kvm_irq_line_state(&pic->irq_states[e->irqchip.pin], | ||
52 | irq_source_id, level); | ||
53 | return kvm_pic_set_irq(pic, e->irqchip.pin, level); | ||
39 | #else | 54 | #else |
40 | return -1; | 55 | return -1; |
41 | #endif | 56 | #endif |
42 | } | 57 | } |
43 | 58 | ||
44 | static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, | 59 | static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, |
45 | struct kvm *kvm, int level) | 60 | struct kvm *kvm, int irq_source_id, int level) |
46 | { | 61 | { |
47 | return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level); | 62 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; |
63 | level = kvm_irq_line_state(&ioapic->irq_states[e->irqchip.pin], | ||
64 | irq_source_id, level); | ||
65 | |||
66 | return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, level); | ||
48 | } | 67 | } |
49 | 68 | ||
50 | inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq) | 69 | inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq) |
@@ -63,8 +82,6 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | |||
63 | int i, r = -1; | 82 | int i, r = -1; |
64 | struct kvm_vcpu *vcpu, *lowest = NULL; | 83 | struct kvm_vcpu *vcpu, *lowest = NULL; |
65 | 84 | ||
66 | WARN_ON(!mutex_is_locked(&kvm->irq_lock)); | ||
67 | |||
68 | if (irq->dest_mode == 0 && irq->dest_id == 0xff && | 85 | if (irq->dest_mode == 0 && irq->dest_id == 0xff && |
69 | kvm_is_dm_lowest_prio(irq)) | 86 | kvm_is_dm_lowest_prio(irq)) |
70 | printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n"); | 87 | printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n"); |
@@ -96,10 +113,13 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | |||
96 | } | 113 | } |
97 | 114 | ||
98 | static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, | 115 | static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, |
99 | struct kvm *kvm, int level) | 116 | struct kvm *kvm, int irq_source_id, int level) |
100 | { | 117 | { |
101 | struct kvm_lapic_irq irq; | 118 | struct kvm_lapic_irq irq; |
102 | 119 | ||
120 | if (!level) | ||
121 | return -1; | ||
122 | |||
103 | trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data); | 123 | trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data); |
104 | 124 | ||
105 | irq.dest_id = (e->msi.address_lo & | 125 | irq.dest_id = (e->msi.address_lo & |
@@ -116,78 +136,67 @@ static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, | |||
116 | return kvm_irq_delivery_to_apic(kvm, NULL, &irq); | 136 | return kvm_irq_delivery_to_apic(kvm, NULL, &irq); |
117 | } | 137 | } |
118 | 138 | ||
119 | /* This should be called with the kvm->irq_lock mutex held | 139 | /* |
120 | * Return value: | 140 | * Return value: |
121 | * < 0 Interrupt was ignored (masked or not delivered for other reasons) | 141 | * < 0 Interrupt was ignored (masked or not delivered for other reasons) |
122 | * = 0 Interrupt was coalesced (previous irq is still pending) | 142 | * = 0 Interrupt was coalesced (previous irq is still pending) |
123 | * > 0 Number of CPUs interrupt was delivered to | 143 | * > 0 Number of CPUs interrupt was delivered to |
124 | */ | 144 | */ |
125 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level) | 145 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level) |
126 | { | 146 | { |
127 | struct kvm_kernel_irq_routing_entry *e; | 147 | struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS]; |
128 | unsigned long *irq_state, sig_level; | 148 | int ret = -1, i = 0; |
129 | int ret = -1; | 149 | struct kvm_irq_routing_table *irq_rt; |
150 | struct hlist_node *n; | ||
130 | 151 | ||
131 | trace_kvm_set_irq(irq, level, irq_source_id); | 152 | trace_kvm_set_irq(irq, level, irq_source_id); |
132 | 153 | ||
133 | WARN_ON(!mutex_is_locked(&kvm->irq_lock)); | ||
134 | |||
135 | if (irq < KVM_IOAPIC_NUM_PINS) { | ||
136 | irq_state = (unsigned long *)&kvm->arch.irq_states[irq]; | ||
137 | |||
138 | /* Logical OR for level trig interrupt */ | ||
139 | if (level) | ||
140 | set_bit(irq_source_id, irq_state); | ||
141 | else | ||
142 | clear_bit(irq_source_id, irq_state); | ||
143 | sig_level = !!(*irq_state); | ||
144 | } else if (!level) | ||
145 | return ret; | ||
146 | else /* Deal with MSI/MSI-X */ | ||
147 | sig_level = 1; | ||
148 | |||
149 | /* Not possible to detect if the guest uses the PIC or the | 154 | /* Not possible to detect if the guest uses the PIC or the |
150 | * IOAPIC. So set the bit in both. The guest will ignore | 155 | * IOAPIC. So set the bit in both. The guest will ignore |
151 | * writes to the unused one. | 156 | * writes to the unused one. |
152 | */ | 157 | */ |
153 | list_for_each_entry(e, &kvm->irq_routing, link) | 158 | rcu_read_lock(); |
154 | if (e->gsi == irq) { | 159 | irq_rt = rcu_dereference(kvm->irq_routing); |
155 | int r = e->set(e, kvm, sig_level); | 160 | if (irq < irq_rt->nr_rt_entries) |
156 | if (r < 0) | 161 | hlist_for_each_entry(e, n, &irq_rt->map[irq], link) |
157 | continue; | 162 | irq_set[i++] = *e; |
163 | rcu_read_unlock(); | ||
164 | |||
165 | while(i--) { | ||
166 | int r; | ||
167 | r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level); | ||
168 | if (r < 0) | ||
169 | continue; | ||
170 | |||
171 | ret = r + ((ret < 0) ? 0 : ret); | ||
172 | } | ||
158 | 173 | ||
159 | ret = r + ((ret < 0) ? 0 : ret); | ||
160 | } | ||
161 | return ret; | 174 | return ret; |
162 | } | 175 | } |
163 | 176 | ||
164 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) | 177 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) |
165 | { | 178 | { |
166 | struct kvm_kernel_irq_routing_entry *e; | ||
167 | struct kvm_irq_ack_notifier *kian; | 179 | struct kvm_irq_ack_notifier *kian; |
168 | struct hlist_node *n; | 180 | struct hlist_node *n; |
169 | unsigned gsi = pin; | 181 | int gsi; |
170 | 182 | ||
171 | trace_kvm_ack_irq(irqchip, pin); | 183 | trace_kvm_ack_irq(irqchip, pin); |
172 | 184 | ||
173 | list_for_each_entry(e, &kvm->irq_routing, link) | 185 | rcu_read_lock(); |
174 | if (e->type == KVM_IRQ_ROUTING_IRQCHIP && | 186 | gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; |
175 | e->irqchip.irqchip == irqchip && | 187 | if (gsi != -1) |
176 | e->irqchip.pin == pin) { | 188 | hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list, |
177 | gsi = e->gsi; | 189 | link) |
178 | break; | 190 | if (kian->gsi == gsi) |
179 | } | 191 | kian->irq_acked(kian); |
180 | 192 | rcu_read_unlock(); | |
181 | hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, link) | ||
182 | if (kian->gsi == gsi) | ||
183 | kian->irq_acked(kian); | ||
184 | } | 193 | } |
185 | 194 | ||
186 | void kvm_register_irq_ack_notifier(struct kvm *kvm, | 195 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
187 | struct kvm_irq_ack_notifier *kian) | 196 | struct kvm_irq_ack_notifier *kian) |
188 | { | 197 | { |
189 | mutex_lock(&kvm->irq_lock); | 198 | mutex_lock(&kvm->irq_lock); |
190 | hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list); | 199 | hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); |
191 | mutex_unlock(&kvm->irq_lock); | 200 | mutex_unlock(&kvm->irq_lock); |
192 | } | 201 | } |
193 | 202 | ||
@@ -195,8 +204,9 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm, | |||
195 | struct kvm_irq_ack_notifier *kian) | 204 | struct kvm_irq_ack_notifier *kian) |
196 | { | 205 | { |
197 | mutex_lock(&kvm->irq_lock); | 206 | mutex_lock(&kvm->irq_lock); |
198 | hlist_del_init(&kian->link); | 207 | hlist_del_init_rcu(&kian->link); |
199 | mutex_unlock(&kvm->irq_lock); | 208 | mutex_unlock(&kvm->irq_lock); |
209 | synchronize_rcu(); | ||
200 | } | 210 | } |
201 | 211 | ||
202 | int kvm_request_irq_source_id(struct kvm *kvm) | 212 | int kvm_request_irq_source_id(struct kvm *kvm) |
@@ -205,16 +215,17 @@ int kvm_request_irq_source_id(struct kvm *kvm) | |||
205 | int irq_source_id; | 215 | int irq_source_id; |
206 | 216 | ||
207 | mutex_lock(&kvm->irq_lock); | 217 | mutex_lock(&kvm->irq_lock); |
208 | irq_source_id = find_first_zero_bit(bitmap, | 218 | irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG); |
209 | sizeof(kvm->arch.irq_sources_bitmap)); | ||
210 | 219 | ||
211 | if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) { | 220 | if (irq_source_id >= BITS_PER_LONG) { |
212 | printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n"); | 221 | printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n"); |
213 | return -EFAULT; | 222 | irq_source_id = -EFAULT; |
223 | goto unlock; | ||
214 | } | 224 | } |
215 | 225 | ||
216 | ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); | 226 | ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); |
217 | set_bit(irq_source_id, bitmap); | 227 | set_bit(irq_source_id, bitmap); |
228 | unlock: | ||
218 | mutex_unlock(&kvm->irq_lock); | 229 | mutex_unlock(&kvm->irq_lock); |
219 | 230 | ||
220 | return irq_source_id; | 231 | return irq_source_id; |
@@ -228,13 +239,23 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) | |||
228 | 239 | ||
229 | mutex_lock(&kvm->irq_lock); | 240 | mutex_lock(&kvm->irq_lock); |
230 | if (irq_source_id < 0 || | 241 | if (irq_source_id < 0 || |
231 | irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) { | 242 | irq_source_id >= BITS_PER_LONG) { |
232 | printk(KERN_ERR "kvm: IRQ source ID out of range!\n"); | 243 | printk(KERN_ERR "kvm: IRQ source ID out of range!\n"); |
233 | return; | 244 | goto unlock; |
234 | } | 245 | } |
235 | for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++) | ||
236 | clear_bit(irq_source_id, &kvm->arch.irq_states[i]); | ||
237 | clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); | 246 | clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); |
247 | if (!irqchip_in_kernel(kvm)) | ||
248 | goto unlock; | ||
249 | |||
250 | for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++) { | ||
251 | clear_bit(irq_source_id, &kvm->arch.vioapic->irq_states[i]); | ||
252 | if (i >= 16) | ||
253 | continue; | ||
254 | #ifdef CONFIG_X86 | ||
255 | clear_bit(irq_source_id, &pic_irqchip(kvm)->irq_states[i]); | ||
256 | #endif | ||
257 | } | ||
258 | unlock: | ||
238 | mutex_unlock(&kvm->irq_lock); | 259 | mutex_unlock(&kvm->irq_lock); |
239 | } | 260 | } |
240 | 261 | ||
@@ -243,7 +264,7 @@ void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, | |||
243 | { | 264 | { |
244 | mutex_lock(&kvm->irq_lock); | 265 | mutex_lock(&kvm->irq_lock); |
245 | kimn->irq = irq; | 266 | kimn->irq = irq; |
246 | hlist_add_head(&kimn->link, &kvm->mask_notifier_list); | 267 | hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list); |
247 | mutex_unlock(&kvm->irq_lock); | 268 | mutex_unlock(&kvm->irq_lock); |
248 | } | 269 | } |
249 | 270 | ||
@@ -251,8 +272,9 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, | |||
251 | struct kvm_irq_mask_notifier *kimn) | 272 | struct kvm_irq_mask_notifier *kimn) |
252 | { | 273 | { |
253 | mutex_lock(&kvm->irq_lock); | 274 | mutex_lock(&kvm->irq_lock); |
254 | hlist_del(&kimn->link); | 275 | hlist_del_rcu(&kimn->link); |
255 | mutex_unlock(&kvm->irq_lock); | 276 | mutex_unlock(&kvm->irq_lock); |
277 | synchronize_rcu(); | ||
256 | } | 278 | } |
257 | 279 | ||
258 | void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) | 280 | void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) |
@@ -260,33 +282,37 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) | |||
260 | struct kvm_irq_mask_notifier *kimn; | 282 | struct kvm_irq_mask_notifier *kimn; |
261 | struct hlist_node *n; | 283 | struct hlist_node *n; |
262 | 284 | ||
263 | WARN_ON(!mutex_is_locked(&kvm->irq_lock)); | 285 | rcu_read_lock(); |
264 | 286 | hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link) | |
265 | hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link) | ||
266 | if (kimn->irq == irq) | 287 | if (kimn->irq == irq) |
267 | kimn->func(kimn, mask); | 288 | kimn->func(kimn, mask); |
268 | } | 289 | rcu_read_unlock(); |
269 | |||
270 | static void __kvm_free_irq_routing(struct list_head *irq_routing) | ||
271 | { | ||
272 | struct kvm_kernel_irq_routing_entry *e, *n; | ||
273 | |||
274 | list_for_each_entry_safe(e, n, irq_routing, link) | ||
275 | kfree(e); | ||
276 | } | 290 | } |
277 | 291 | ||
278 | void kvm_free_irq_routing(struct kvm *kvm) | 292 | void kvm_free_irq_routing(struct kvm *kvm) |
279 | { | 293 | { |
280 | mutex_lock(&kvm->irq_lock); | 294 | /* Called only during vm destruction. Nobody can use the pointer |
281 | __kvm_free_irq_routing(&kvm->irq_routing); | 295 | at this stage */ |
282 | mutex_unlock(&kvm->irq_lock); | 296 | kfree(kvm->irq_routing); |
283 | } | 297 | } |
284 | 298 | ||
285 | static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e, | 299 | static int setup_routing_entry(struct kvm_irq_routing_table *rt, |
300 | struct kvm_kernel_irq_routing_entry *e, | ||
286 | const struct kvm_irq_routing_entry *ue) | 301 | const struct kvm_irq_routing_entry *ue) |
287 | { | 302 | { |
288 | int r = -EINVAL; | 303 | int r = -EINVAL; |
289 | int delta; | 304 | int delta; |
305 | struct kvm_kernel_irq_routing_entry *ei; | ||
306 | struct hlist_node *n; | ||
307 | |||
308 | /* | ||
309 | * Do not allow GSI to be mapped to the same irqchip more than once. | ||
310 | * Allow only one to one mapping between GSI and MSI. | ||
311 | */ | ||
312 | hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link) | ||
313 | if (ei->type == KVM_IRQ_ROUTING_MSI || | ||
314 | ue->u.irqchip.irqchip == ei->irqchip.irqchip) | ||
315 | return r; | ||
290 | 316 | ||
291 | e->gsi = ue->gsi; | 317 | e->gsi = ue->gsi; |
292 | e->type = ue->type; | 318 | e->type = ue->type; |
@@ -309,6 +335,9 @@ static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e, | |||
309 | } | 335 | } |
310 | e->irqchip.irqchip = ue->u.irqchip.irqchip; | 336 | e->irqchip.irqchip = ue->u.irqchip.irqchip; |
311 | e->irqchip.pin = ue->u.irqchip.pin + delta; | 337 | e->irqchip.pin = ue->u.irqchip.pin + delta; |
338 | if (e->irqchip.pin >= KVM_IOAPIC_NUM_PINS) | ||
339 | goto out; | ||
340 | rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi; | ||
312 | break; | 341 | break; |
313 | case KVM_IRQ_ROUTING_MSI: | 342 | case KVM_IRQ_ROUTING_MSI: |
314 | e->set = kvm_set_msi; | 343 | e->set = kvm_set_msi; |
@@ -319,6 +348,8 @@ static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e, | |||
319 | default: | 348 | default: |
320 | goto out; | 349 | goto out; |
321 | } | 350 | } |
351 | |||
352 | hlist_add_head(&e->link, &rt->map[e->gsi]); | ||
322 | r = 0; | 353 | r = 0; |
323 | out: | 354 | out: |
324 | return r; | 355 | return r; |
@@ -330,43 +361,53 @@ int kvm_set_irq_routing(struct kvm *kvm, | |||
330 | unsigned nr, | 361 | unsigned nr, |
331 | unsigned flags) | 362 | unsigned flags) |
332 | { | 363 | { |
333 | struct list_head irq_list = LIST_HEAD_INIT(irq_list); | 364 | struct kvm_irq_routing_table *new, *old; |
334 | struct list_head tmp = LIST_HEAD_INIT(tmp); | 365 | u32 i, j, nr_rt_entries = 0; |
335 | struct kvm_kernel_irq_routing_entry *e = NULL; | ||
336 | unsigned i; | ||
337 | int r; | 366 | int r; |
338 | 367 | ||
339 | for (i = 0; i < nr; ++i) { | 368 | for (i = 0; i < nr; ++i) { |
369 | if (ue[i].gsi >= KVM_MAX_IRQ_ROUTES) | ||
370 | return -EINVAL; | ||
371 | nr_rt_entries = max(nr_rt_entries, ue[i].gsi); | ||
372 | } | ||
373 | |||
374 | nr_rt_entries += 1; | ||
375 | |||
376 | new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head)) | ||
377 | + (nr * sizeof(struct kvm_kernel_irq_routing_entry)), | ||
378 | GFP_KERNEL); | ||
379 | |||
380 | if (!new) | ||
381 | return -ENOMEM; | ||
382 | |||
383 | new->rt_entries = (void *)&new->map[nr_rt_entries]; | ||
384 | |||
385 | new->nr_rt_entries = nr_rt_entries; | ||
386 | for (i = 0; i < 3; i++) | ||
387 | for (j = 0; j < KVM_IOAPIC_NUM_PINS; j++) | ||
388 | new->chip[i][j] = -1; | ||
389 | |||
390 | for (i = 0; i < nr; ++i) { | ||
340 | r = -EINVAL; | 391 | r = -EINVAL; |
341 | if (ue->gsi >= KVM_MAX_IRQ_ROUTES) | ||
342 | goto out; | ||
343 | if (ue->flags) | 392 | if (ue->flags) |
344 | goto out; | 393 | goto out; |
345 | r = -ENOMEM; | 394 | r = setup_routing_entry(new, &new->rt_entries[i], ue); |
346 | e = kzalloc(sizeof(*e), GFP_KERNEL); | ||
347 | if (!e) | ||
348 | goto out; | ||
349 | r = setup_routing_entry(e, ue); | ||
350 | if (r) | 395 | if (r) |
351 | goto out; | 396 | goto out; |
352 | ++ue; | 397 | ++ue; |
353 | list_add(&e->link, &irq_list); | ||
354 | e = NULL; | ||
355 | } | 398 | } |
356 | 399 | ||
357 | mutex_lock(&kvm->irq_lock); | 400 | mutex_lock(&kvm->irq_lock); |
358 | list_splice(&kvm->irq_routing, &tmp); | 401 | old = kvm->irq_routing; |
359 | INIT_LIST_HEAD(&kvm->irq_routing); | 402 | rcu_assign_pointer(kvm->irq_routing, new); |
360 | list_splice(&irq_list, &kvm->irq_routing); | ||
361 | INIT_LIST_HEAD(&irq_list); | ||
362 | list_splice(&tmp, &irq_list); | ||
363 | mutex_unlock(&kvm->irq_lock); | 403 | mutex_unlock(&kvm->irq_lock); |
404 | synchronize_rcu(); | ||
364 | 405 | ||
406 | new = old; | ||
365 | r = 0; | 407 | r = 0; |
366 | 408 | ||
367 | out: | 409 | out: |
368 | kfree(e); | 410 | kfree(new); |
369 | __kvm_free_irq_routing(&irq_list); | ||
370 | return r; | 411 | return r; |
371 | } | 412 | } |
372 | 413 | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 7495ce34734..f92ba138007 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/swap.h> | 43 | #include <linux/swap.h> |
44 | #include <linux/bitops.h> | 44 | #include <linux/bitops.h> |
45 | #include <linux/spinlock.h> | 45 | #include <linux/spinlock.h> |
46 | #include <linux/compat.h> | ||
46 | 47 | ||
47 | #include <asm/processor.h> | 48 | #include <asm/processor.h> |
48 | #include <asm/io.h> | 49 | #include <asm/io.h> |
@@ -53,12 +54,6 @@ | |||
53 | #include "coalesced_mmio.h" | 54 | #include "coalesced_mmio.h" |
54 | #endif | 55 | #endif |
55 | 56 | ||
56 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | ||
57 | #include <linux/pci.h> | ||
58 | #include <linux/interrupt.h> | ||
59 | #include "irq.h" | ||
60 | #endif | ||
61 | |||
62 | #define CREATE_TRACE_POINTS | 57 | #define CREATE_TRACE_POINTS |
63 | #include <trace/events/kvm.h> | 58 | #include <trace/events/kvm.h> |
64 | 59 | ||
@@ -75,6 +70,8 @@ DEFINE_SPINLOCK(kvm_lock); | |||
75 | LIST_HEAD(vm_list); | 70 | LIST_HEAD(vm_list); |
76 | 71 | ||
77 | static cpumask_var_t cpus_hardware_enabled; | 72 | static cpumask_var_t cpus_hardware_enabled; |
73 | static int kvm_usage_count = 0; | ||
74 | static atomic_t hardware_enable_failed; | ||
78 | 75 | ||
79 | struct kmem_cache *kvm_vcpu_cache; | 76 | struct kmem_cache *kvm_vcpu_cache; |
80 | EXPORT_SYMBOL_GPL(kvm_vcpu_cache); | 77 | EXPORT_SYMBOL_GPL(kvm_vcpu_cache); |
@@ -85,615 +82,13 @@ struct dentry *kvm_debugfs_dir; | |||
85 | 82 | ||
86 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, | 83 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, |
87 | unsigned long arg); | 84 | unsigned long arg); |
85 | static int hardware_enable_all(void); | ||
86 | static void hardware_disable_all(void); | ||
88 | 87 | ||
89 | static bool kvm_rebooting; | 88 | static bool kvm_rebooting; |
90 | 89 | ||
91 | static bool largepages_enabled = true; | 90 | static bool largepages_enabled = true; |
92 | 91 | ||
93 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | ||
94 | static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, | ||
95 | int assigned_dev_id) | ||
96 | { | ||
97 | struct list_head *ptr; | ||
98 | struct kvm_assigned_dev_kernel *match; | ||
99 | |||
100 | list_for_each(ptr, head) { | ||
101 | match = list_entry(ptr, struct kvm_assigned_dev_kernel, list); | ||
102 | if (match->assigned_dev_id == assigned_dev_id) | ||
103 | return match; | ||
104 | } | ||
105 | return NULL; | ||
106 | } | ||
107 | |||
108 | static int find_index_from_host_irq(struct kvm_assigned_dev_kernel | ||
109 | *assigned_dev, int irq) | ||
110 | { | ||
111 | int i, index; | ||
112 | struct msix_entry *host_msix_entries; | ||
113 | |||
114 | host_msix_entries = assigned_dev->host_msix_entries; | ||
115 | |||
116 | index = -1; | ||
117 | for (i = 0; i < assigned_dev->entries_nr; i++) | ||
118 | if (irq == host_msix_entries[i].vector) { | ||
119 | index = i; | ||
120 | break; | ||
121 | } | ||
122 | if (index < 0) { | ||
123 | printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n"); | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | return index; | ||
128 | } | ||
129 | |||
130 | static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) | ||
131 | { | ||
132 | struct kvm_assigned_dev_kernel *assigned_dev; | ||
133 | struct kvm *kvm; | ||
134 | int i; | ||
135 | |||
136 | assigned_dev = container_of(work, struct kvm_assigned_dev_kernel, | ||
137 | interrupt_work); | ||
138 | kvm = assigned_dev->kvm; | ||
139 | |||
140 | mutex_lock(&kvm->irq_lock); | ||
141 | spin_lock_irq(&assigned_dev->assigned_dev_lock); | ||
142 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | ||
143 | struct kvm_guest_msix_entry *guest_entries = | ||
144 | assigned_dev->guest_msix_entries; | ||
145 | for (i = 0; i < assigned_dev->entries_nr; i++) { | ||
146 | if (!(guest_entries[i].flags & | ||
147 | KVM_ASSIGNED_MSIX_PENDING)) | ||
148 | continue; | ||
149 | guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING; | ||
150 | kvm_set_irq(assigned_dev->kvm, | ||
151 | assigned_dev->irq_source_id, | ||
152 | guest_entries[i].vector, 1); | ||
153 | } | ||
154 | } else | ||
155 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, | ||
156 | assigned_dev->guest_irq, 1); | ||
157 | |||
158 | spin_unlock_irq(&assigned_dev->assigned_dev_lock); | ||
159 | mutex_unlock(&assigned_dev->kvm->irq_lock); | ||
160 | } | ||
161 | |||
162 | static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) | ||
163 | { | ||
164 | unsigned long flags; | ||
165 | struct kvm_assigned_dev_kernel *assigned_dev = | ||
166 | (struct kvm_assigned_dev_kernel *) dev_id; | ||
167 | |||
168 | spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags); | ||
169 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | ||
170 | int index = find_index_from_host_irq(assigned_dev, irq); | ||
171 | if (index < 0) | ||
172 | goto out; | ||
173 | assigned_dev->guest_msix_entries[index].flags |= | ||
174 | KVM_ASSIGNED_MSIX_PENDING; | ||
175 | } | ||
176 | |||
177 | schedule_work(&assigned_dev->interrupt_work); | ||
178 | |||
179 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { | ||
180 | disable_irq_nosync(irq); | ||
181 | assigned_dev->host_irq_disabled = true; | ||
182 | } | ||
183 | |||
184 | out: | ||
185 | spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags); | ||
186 | return IRQ_HANDLED; | ||
187 | } | ||
188 | |||
189 | /* Ack the irq line for an assigned device */ | ||
190 | static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) | ||
191 | { | ||
192 | struct kvm_assigned_dev_kernel *dev; | ||
193 | unsigned long flags; | ||
194 | |||
195 | if (kian->gsi == -1) | ||
196 | return; | ||
197 | |||
198 | dev = container_of(kian, struct kvm_assigned_dev_kernel, | ||
199 | ack_notifier); | ||
200 | |||
201 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); | ||
202 | |||
203 | /* The guest irq may be shared so this ack may be | ||
204 | * from another device. | ||
205 | */ | ||
206 | spin_lock_irqsave(&dev->assigned_dev_lock, flags); | ||
207 | if (dev->host_irq_disabled) { | ||
208 | enable_irq(dev->host_irq); | ||
209 | dev->host_irq_disabled = false; | ||
210 | } | ||
211 | spin_unlock_irqrestore(&dev->assigned_dev_lock, flags); | ||
212 | } | ||
213 | |||
214 | static void deassign_guest_irq(struct kvm *kvm, | ||
215 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
216 | { | ||
217 | kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); | ||
218 | assigned_dev->ack_notifier.gsi = -1; | ||
219 | |||
220 | if (assigned_dev->irq_source_id != -1) | ||
221 | kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); | ||
222 | assigned_dev->irq_source_id = -1; | ||
223 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK); | ||
224 | } | ||
225 | |||
226 | /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */ | ||
227 | static void deassign_host_irq(struct kvm *kvm, | ||
228 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
229 | { | ||
230 | /* | ||
231 | * In kvm_free_device_irq, cancel_work_sync return true if: | ||
232 | * 1. work is scheduled, and then cancelled. | ||
233 | * 2. work callback is executed. | ||
234 | * | ||
235 | * The first one ensured that the irq is disabled and no more events | ||
236 | * would happen. But for the second one, the irq may be enabled (e.g. | ||
237 | * for MSI). So we disable irq here to prevent further events. | ||
238 | * | ||
239 | * Notice this maybe result in nested disable if the interrupt type is | ||
240 | * INTx, but it's OK for we are going to free it. | ||
241 | * | ||
242 | * If this function is a part of VM destroy, please ensure that till | ||
243 | * now, the kvm state is still legal for probably we also have to wait | ||
244 | * interrupt_work done. | ||
245 | */ | ||
246 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | ||
247 | int i; | ||
248 | for (i = 0; i < assigned_dev->entries_nr; i++) | ||
249 | disable_irq_nosync(assigned_dev-> | ||
250 | host_msix_entries[i].vector); | ||
251 | |||
252 | cancel_work_sync(&assigned_dev->interrupt_work); | ||
253 | |||
254 | for (i = 0; i < assigned_dev->entries_nr; i++) | ||
255 | free_irq(assigned_dev->host_msix_entries[i].vector, | ||
256 | (void *)assigned_dev); | ||
257 | |||
258 | assigned_dev->entries_nr = 0; | ||
259 | kfree(assigned_dev->host_msix_entries); | ||
260 | kfree(assigned_dev->guest_msix_entries); | ||
261 | pci_disable_msix(assigned_dev->dev); | ||
262 | } else { | ||
263 | /* Deal with MSI and INTx */ | ||
264 | disable_irq_nosync(assigned_dev->host_irq); | ||
265 | cancel_work_sync(&assigned_dev->interrupt_work); | ||
266 | |||
267 | free_irq(assigned_dev->host_irq, (void *)assigned_dev); | ||
268 | |||
269 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI) | ||
270 | pci_disable_msi(assigned_dev->dev); | ||
271 | } | ||
272 | |||
273 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK); | ||
274 | } | ||
275 | |||
276 | static int kvm_deassign_irq(struct kvm *kvm, | ||
277 | struct kvm_assigned_dev_kernel *assigned_dev, | ||
278 | unsigned long irq_requested_type) | ||
279 | { | ||
280 | unsigned long guest_irq_type, host_irq_type; | ||
281 | |||
282 | if (!irqchip_in_kernel(kvm)) | ||
283 | return -EINVAL; | ||
284 | /* no irq assignment to deassign */ | ||
285 | if (!assigned_dev->irq_requested_type) | ||
286 | return -ENXIO; | ||
287 | |||
288 | host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK; | ||
289 | guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK; | ||
290 | |||
291 | if (host_irq_type) | ||
292 | deassign_host_irq(kvm, assigned_dev); | ||
293 | if (guest_irq_type) | ||
294 | deassign_guest_irq(kvm, assigned_dev); | ||
295 | |||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | static void kvm_free_assigned_irq(struct kvm *kvm, | ||
300 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
301 | { | ||
302 | kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type); | ||
303 | } | ||
304 | |||
305 | static void kvm_free_assigned_device(struct kvm *kvm, | ||
306 | struct kvm_assigned_dev_kernel | ||
307 | *assigned_dev) | ||
308 | { | ||
309 | kvm_free_assigned_irq(kvm, assigned_dev); | ||
310 | |||
311 | pci_reset_function(assigned_dev->dev); | ||
312 | |||
313 | pci_release_regions(assigned_dev->dev); | ||
314 | pci_disable_device(assigned_dev->dev); | ||
315 | pci_dev_put(assigned_dev->dev); | ||
316 | |||
317 | list_del(&assigned_dev->list); | ||
318 | kfree(assigned_dev); | ||
319 | } | ||
320 | |||
321 | void kvm_free_all_assigned_devices(struct kvm *kvm) | ||
322 | { | ||
323 | struct list_head *ptr, *ptr2; | ||
324 | struct kvm_assigned_dev_kernel *assigned_dev; | ||
325 | |||
326 | list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { | ||
327 | assigned_dev = list_entry(ptr, | ||
328 | struct kvm_assigned_dev_kernel, | ||
329 | list); | ||
330 | |||
331 | kvm_free_assigned_device(kvm, assigned_dev); | ||
332 | } | ||
333 | } | ||
334 | |||
335 | static int assigned_device_enable_host_intx(struct kvm *kvm, | ||
336 | struct kvm_assigned_dev_kernel *dev) | ||
337 | { | ||
338 | dev->host_irq = dev->dev->irq; | ||
339 | /* Even though this is PCI, we don't want to use shared | ||
340 | * interrupts. Sharing host devices with guest-assigned devices | ||
341 | * on the same interrupt line is not a happy situation: there | ||
342 | * are going to be long delays in accepting, acking, etc. | ||
343 | */ | ||
344 | if (request_irq(dev->host_irq, kvm_assigned_dev_intr, | ||
345 | 0, "kvm_assigned_intx_device", (void *)dev)) | ||
346 | return -EIO; | ||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | #ifdef __KVM_HAVE_MSI | ||
351 | static int assigned_device_enable_host_msi(struct kvm *kvm, | ||
352 | struct kvm_assigned_dev_kernel *dev) | ||
353 | { | ||
354 | int r; | ||
355 | |||
356 | if (!dev->dev->msi_enabled) { | ||
357 | r = pci_enable_msi(dev->dev); | ||
358 | if (r) | ||
359 | return r; | ||
360 | } | ||
361 | |||
362 | dev->host_irq = dev->dev->irq; | ||
363 | if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0, | ||
364 | "kvm_assigned_msi_device", (void *)dev)) { | ||
365 | pci_disable_msi(dev->dev); | ||
366 | return -EIO; | ||
367 | } | ||
368 | |||
369 | return 0; | ||
370 | } | ||
371 | #endif | ||
372 | |||
373 | #ifdef __KVM_HAVE_MSIX | ||
374 | static int assigned_device_enable_host_msix(struct kvm *kvm, | ||
375 | struct kvm_assigned_dev_kernel *dev) | ||
376 | { | ||
377 | int i, r = -EINVAL; | ||
378 | |||
379 | /* host_msix_entries and guest_msix_entries should have been | ||
380 | * initialized */ | ||
381 | if (dev->entries_nr == 0) | ||
382 | return r; | ||
383 | |||
384 | r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr); | ||
385 | if (r) | ||
386 | return r; | ||
387 | |||
388 | for (i = 0; i < dev->entries_nr; i++) { | ||
389 | r = request_irq(dev->host_msix_entries[i].vector, | ||
390 | kvm_assigned_dev_intr, 0, | ||
391 | "kvm_assigned_msix_device", | ||
392 | (void *)dev); | ||
393 | /* FIXME: free requested_irq's on failure */ | ||
394 | if (r) | ||
395 | return r; | ||
396 | } | ||
397 | |||
398 | return 0; | ||
399 | } | ||
400 | |||
401 | #endif | ||
402 | |||
403 | static int assigned_device_enable_guest_intx(struct kvm *kvm, | ||
404 | struct kvm_assigned_dev_kernel *dev, | ||
405 | struct kvm_assigned_irq *irq) | ||
406 | { | ||
407 | dev->guest_irq = irq->guest_irq; | ||
408 | dev->ack_notifier.gsi = irq->guest_irq; | ||
409 | return 0; | ||
410 | } | ||
411 | |||
412 | #ifdef __KVM_HAVE_MSI | ||
413 | static int assigned_device_enable_guest_msi(struct kvm *kvm, | ||
414 | struct kvm_assigned_dev_kernel *dev, | ||
415 | struct kvm_assigned_irq *irq) | ||
416 | { | ||
417 | dev->guest_irq = irq->guest_irq; | ||
418 | dev->ack_notifier.gsi = -1; | ||
419 | dev->host_irq_disabled = false; | ||
420 | return 0; | ||
421 | } | ||
422 | #endif | ||
423 | #ifdef __KVM_HAVE_MSIX | ||
424 | static int assigned_device_enable_guest_msix(struct kvm *kvm, | ||
425 | struct kvm_assigned_dev_kernel *dev, | ||
426 | struct kvm_assigned_irq *irq) | ||
427 | { | ||
428 | dev->guest_irq = irq->guest_irq; | ||
429 | dev->ack_notifier.gsi = -1; | ||
430 | dev->host_irq_disabled = false; | ||
431 | return 0; | ||
432 | } | ||
433 | #endif | ||
434 | |||
435 | static int assign_host_irq(struct kvm *kvm, | ||
436 | struct kvm_assigned_dev_kernel *dev, | ||
437 | __u32 host_irq_type) | ||
438 | { | ||
439 | int r = -EEXIST; | ||
440 | |||
441 | if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK) | ||
442 | return r; | ||
443 | |||
444 | switch (host_irq_type) { | ||
445 | case KVM_DEV_IRQ_HOST_INTX: | ||
446 | r = assigned_device_enable_host_intx(kvm, dev); | ||
447 | break; | ||
448 | #ifdef __KVM_HAVE_MSI | ||
449 | case KVM_DEV_IRQ_HOST_MSI: | ||
450 | r = assigned_device_enable_host_msi(kvm, dev); | ||
451 | break; | ||
452 | #endif | ||
453 | #ifdef __KVM_HAVE_MSIX | ||
454 | case KVM_DEV_IRQ_HOST_MSIX: | ||
455 | r = assigned_device_enable_host_msix(kvm, dev); | ||
456 | break; | ||
457 | #endif | ||
458 | default: | ||
459 | r = -EINVAL; | ||
460 | } | ||
461 | |||
462 | if (!r) | ||
463 | dev->irq_requested_type |= host_irq_type; | ||
464 | |||
465 | return r; | ||
466 | } | ||
467 | |||
468 | static int assign_guest_irq(struct kvm *kvm, | ||
469 | struct kvm_assigned_dev_kernel *dev, | ||
470 | struct kvm_assigned_irq *irq, | ||
471 | unsigned long guest_irq_type) | ||
472 | { | ||
473 | int id; | ||
474 | int r = -EEXIST; | ||
475 | |||
476 | if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK) | ||
477 | return r; | ||
478 | |||
479 | id = kvm_request_irq_source_id(kvm); | ||
480 | if (id < 0) | ||
481 | return id; | ||
482 | |||
483 | dev->irq_source_id = id; | ||
484 | |||
485 | switch (guest_irq_type) { | ||
486 | case KVM_DEV_IRQ_GUEST_INTX: | ||
487 | r = assigned_device_enable_guest_intx(kvm, dev, irq); | ||
488 | break; | ||
489 | #ifdef __KVM_HAVE_MSI | ||
490 | case KVM_DEV_IRQ_GUEST_MSI: | ||
491 | r = assigned_device_enable_guest_msi(kvm, dev, irq); | ||
492 | break; | ||
493 | #endif | ||
494 | #ifdef __KVM_HAVE_MSIX | ||
495 | case KVM_DEV_IRQ_GUEST_MSIX: | ||
496 | r = assigned_device_enable_guest_msix(kvm, dev, irq); | ||
497 | break; | ||
498 | #endif | ||
499 | default: | ||
500 | r = -EINVAL; | ||
501 | } | ||
502 | |||
503 | if (!r) { | ||
504 | dev->irq_requested_type |= guest_irq_type; | ||
505 | kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier); | ||
506 | } else | ||
507 | kvm_free_irq_source_id(kvm, dev->irq_source_id); | ||
508 | |||
509 | return r; | ||
510 | } | ||
511 | |||
512 | /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */ | ||
513 | static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, | ||
514 | struct kvm_assigned_irq *assigned_irq) | ||
515 | { | ||
516 | int r = -EINVAL; | ||
517 | struct kvm_assigned_dev_kernel *match; | ||
518 | unsigned long host_irq_type, guest_irq_type; | ||
519 | |||
520 | if (!capable(CAP_SYS_RAWIO)) | ||
521 | return -EPERM; | ||
522 | |||
523 | if (!irqchip_in_kernel(kvm)) | ||
524 | return r; | ||
525 | |||
526 | mutex_lock(&kvm->lock); | ||
527 | r = -ENODEV; | ||
528 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
529 | assigned_irq->assigned_dev_id); | ||
530 | if (!match) | ||
531 | goto out; | ||
532 | |||
533 | host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK); | ||
534 | guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK); | ||
535 | |||
536 | r = -EINVAL; | ||
537 | /* can only assign one type at a time */ | ||
538 | if (hweight_long(host_irq_type) > 1) | ||
539 | goto out; | ||
540 | if (hweight_long(guest_irq_type) > 1) | ||
541 | goto out; | ||
542 | if (host_irq_type == 0 && guest_irq_type == 0) | ||
543 | goto out; | ||
544 | |||
545 | r = 0; | ||
546 | if (host_irq_type) | ||
547 | r = assign_host_irq(kvm, match, host_irq_type); | ||
548 | if (r) | ||
549 | goto out; | ||
550 | |||
551 | if (guest_irq_type) | ||
552 | r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type); | ||
553 | out: | ||
554 | mutex_unlock(&kvm->lock); | ||
555 | return r; | ||
556 | } | ||
557 | |||
558 | static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, | ||
559 | struct kvm_assigned_irq | ||
560 | *assigned_irq) | ||
561 | { | ||
562 | int r = -ENODEV; | ||
563 | struct kvm_assigned_dev_kernel *match; | ||
564 | |||
565 | mutex_lock(&kvm->lock); | ||
566 | |||
567 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
568 | assigned_irq->assigned_dev_id); | ||
569 | if (!match) | ||
570 | goto out; | ||
571 | |||
572 | r = kvm_deassign_irq(kvm, match, assigned_irq->flags); | ||
573 | out: | ||
574 | mutex_unlock(&kvm->lock); | ||
575 | return r; | ||
576 | } | ||
577 | |||
578 | static int kvm_vm_ioctl_assign_device(struct kvm *kvm, | ||
579 | struct kvm_assigned_pci_dev *assigned_dev) | ||
580 | { | ||
581 | int r = 0; | ||
582 | struct kvm_assigned_dev_kernel *match; | ||
583 | struct pci_dev *dev; | ||
584 | |||
585 | down_read(&kvm->slots_lock); | ||
586 | mutex_lock(&kvm->lock); | ||
587 | |||
588 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
589 | assigned_dev->assigned_dev_id); | ||
590 | if (match) { | ||
591 | /* device already assigned */ | ||
592 | r = -EEXIST; | ||
593 | goto out; | ||
594 | } | ||
595 | |||
596 | match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL); | ||
597 | if (match == NULL) { | ||
598 | printk(KERN_INFO "%s: Couldn't allocate memory\n", | ||
599 | __func__); | ||
600 | r = -ENOMEM; | ||
601 | goto out; | ||
602 | } | ||
603 | dev = pci_get_bus_and_slot(assigned_dev->busnr, | ||
604 | assigned_dev->devfn); | ||
605 | if (!dev) { | ||
606 | printk(KERN_INFO "%s: host device not found\n", __func__); | ||
607 | r = -EINVAL; | ||
608 | goto out_free; | ||
609 | } | ||
610 | if (pci_enable_device(dev)) { | ||
611 | printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); | ||
612 | r = -EBUSY; | ||
613 | goto out_put; | ||
614 | } | ||
615 | r = pci_request_regions(dev, "kvm_assigned_device"); | ||
616 | if (r) { | ||
617 | printk(KERN_INFO "%s: Could not get access to device regions\n", | ||
618 | __func__); | ||
619 | goto out_disable; | ||
620 | } | ||
621 | |||
622 | pci_reset_function(dev); | ||
623 | |||
624 | match->assigned_dev_id = assigned_dev->assigned_dev_id; | ||
625 | match->host_busnr = assigned_dev->busnr; | ||
626 | match->host_devfn = assigned_dev->devfn; | ||
627 | match->flags = assigned_dev->flags; | ||
628 | match->dev = dev; | ||
629 | spin_lock_init(&match->assigned_dev_lock); | ||
630 | match->irq_source_id = -1; | ||
631 | match->kvm = kvm; | ||
632 | match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; | ||
633 | INIT_WORK(&match->interrupt_work, | ||
634 | kvm_assigned_dev_interrupt_work_handler); | ||
635 | |||
636 | list_add(&match->list, &kvm->arch.assigned_dev_head); | ||
637 | |||
638 | if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { | ||
639 | if (!kvm->arch.iommu_domain) { | ||
640 | r = kvm_iommu_map_guest(kvm); | ||
641 | if (r) | ||
642 | goto out_list_del; | ||
643 | } | ||
644 | r = kvm_assign_device(kvm, match); | ||
645 | if (r) | ||
646 | goto out_list_del; | ||
647 | } | ||
648 | |||
649 | out: | ||
650 | mutex_unlock(&kvm->lock); | ||
651 | up_read(&kvm->slots_lock); | ||
652 | return r; | ||
653 | out_list_del: | ||
654 | list_del(&match->list); | ||
655 | pci_release_regions(dev); | ||
656 | out_disable: | ||
657 | pci_disable_device(dev); | ||
658 | out_put: | ||
659 | pci_dev_put(dev); | ||
660 | out_free: | ||
661 | kfree(match); | ||
662 | mutex_unlock(&kvm->lock); | ||
663 | up_read(&kvm->slots_lock); | ||
664 | return r; | ||
665 | } | ||
666 | #endif | ||
667 | |||
668 | #ifdef KVM_CAP_DEVICE_DEASSIGNMENT | ||
669 | static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, | ||
670 | struct kvm_assigned_pci_dev *assigned_dev) | ||
671 | { | ||
672 | int r = 0; | ||
673 | struct kvm_assigned_dev_kernel *match; | ||
674 | |||
675 | mutex_lock(&kvm->lock); | ||
676 | |||
677 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
678 | assigned_dev->assigned_dev_id); | ||
679 | if (!match) { | ||
680 | printk(KERN_INFO "%s: device hasn't been assigned before, " | ||
681 | "so cannot be deassigned\n", __func__); | ||
682 | r = -EINVAL; | ||
683 | goto out; | ||
684 | } | ||
685 | |||
686 | if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) | ||
687 | kvm_deassign_device(kvm, match); | ||
688 | |||
689 | kvm_free_assigned_device(kvm, match); | ||
690 | |||
691 | out: | ||
692 | mutex_unlock(&kvm->lock); | ||
693 | return r; | ||
694 | } | ||
695 | #endif | ||
696 | |||
697 | inline int kvm_is_mmio_pfn(pfn_t pfn) | 92 | inline int kvm_is_mmio_pfn(pfn_t pfn) |
698 | { | 93 | { |
699 | if (pfn_valid(pfn)) { | 94 | if (pfn_valid(pfn)) { |
@@ -949,6 +344,7 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { | |||
949 | 344 | ||
950 | static struct kvm *kvm_create_vm(void) | 345 | static struct kvm *kvm_create_vm(void) |
951 | { | 346 | { |
347 | int r = 0; | ||
952 | struct kvm *kvm = kvm_arch_create_vm(); | 348 | struct kvm *kvm = kvm_arch_create_vm(); |
953 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 349 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
954 | struct page *page; | 350 | struct page *page; |
@@ -956,16 +352,21 @@ static struct kvm *kvm_create_vm(void) | |||
956 | 352 | ||
957 | if (IS_ERR(kvm)) | 353 | if (IS_ERR(kvm)) |
958 | goto out; | 354 | goto out; |
355 | |||
356 | r = hardware_enable_all(); | ||
357 | if (r) | ||
358 | goto out_err_nodisable; | ||
359 | |||
959 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | 360 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
960 | INIT_LIST_HEAD(&kvm->irq_routing); | ||
961 | INIT_HLIST_HEAD(&kvm->mask_notifier_list); | 361 | INIT_HLIST_HEAD(&kvm->mask_notifier_list); |
362 | INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); | ||
962 | #endif | 363 | #endif |
963 | 364 | ||
964 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 365 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
965 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | 366 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
966 | if (!page) { | 367 | if (!page) { |
967 | kfree(kvm); | 368 | r = -ENOMEM; |
968 | return ERR_PTR(-ENOMEM); | 369 | goto out_err; |
969 | } | 370 | } |
970 | kvm->coalesced_mmio_ring = | 371 | kvm->coalesced_mmio_ring = |
971 | (struct kvm_coalesced_mmio_ring *)page_address(page); | 372 | (struct kvm_coalesced_mmio_ring *)page_address(page); |
@@ -973,15 +374,13 @@ static struct kvm *kvm_create_vm(void) | |||
973 | 374 | ||
974 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) | 375 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
975 | { | 376 | { |
976 | int err; | ||
977 | kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; | 377 | kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; |
978 | err = mmu_notifier_register(&kvm->mmu_notifier, current->mm); | 378 | r = mmu_notifier_register(&kvm->mmu_notifier, current->mm); |
979 | if (err) { | 379 | if (r) { |
980 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 380 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
981 | put_page(page); | 381 | put_page(page); |
982 | #endif | 382 | #endif |
983 | kfree(kvm); | 383 | goto out_err; |
984 | return ERR_PTR(err); | ||
985 | } | 384 | } |
986 | } | 385 | } |
987 | #endif | 386 | #endif |
@@ -1005,6 +404,12 @@ static struct kvm *kvm_create_vm(void) | |||
1005 | #endif | 404 | #endif |
1006 | out: | 405 | out: |
1007 | return kvm; | 406 | return kvm; |
407 | |||
408 | out_err: | ||
409 | hardware_disable_all(); | ||
410 | out_err_nodisable: | ||
411 | kfree(kvm); | ||
412 | return ERR_PTR(r); | ||
1008 | } | 413 | } |
1009 | 414 | ||
1010 | /* | 415 | /* |
@@ -1063,6 +468,7 @@ static void kvm_destroy_vm(struct kvm *kvm) | |||
1063 | kvm_arch_flush_shadow(kvm); | 468 | kvm_arch_flush_shadow(kvm); |
1064 | #endif | 469 | #endif |
1065 | kvm_arch_destroy_vm(kvm); | 470 | kvm_arch_destroy_vm(kvm); |
471 | hardware_disable_all(); | ||
1066 | mmdrop(mm); | 472 | mmdrop(mm); |
1067 | } | 473 | } |
1068 | 474 | ||
@@ -1689,9 +1095,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) | |||
1689 | if (signal_pending(current)) | 1095 | if (signal_pending(current)) |
1690 | break; | 1096 | break; |
1691 | 1097 | ||
1692 | vcpu_put(vcpu); | ||
1693 | schedule(); | 1098 | schedule(); |
1694 | vcpu_load(vcpu); | ||
1695 | } | 1099 | } |
1696 | 1100 | ||
1697 | finish_wait(&vcpu->wq, &wait); | 1101 | finish_wait(&vcpu->wq, &wait); |
@@ -1705,6 +1109,21 @@ void kvm_resched(struct kvm_vcpu *vcpu) | |||
1705 | } | 1109 | } |
1706 | EXPORT_SYMBOL_GPL(kvm_resched); | 1110 | EXPORT_SYMBOL_GPL(kvm_resched); |
1707 | 1111 | ||
1112 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu) | ||
1113 | { | ||
1114 | ktime_t expires; | ||
1115 | DEFINE_WAIT(wait); | ||
1116 | |||
1117 | prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); | ||
1118 | |||
1119 | /* Sleep for 100 us, and hope lock-holder got scheduled */ | ||
1120 | expires = ktime_add_ns(ktime_get(), 100000UL); | ||
1121 | schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); | ||
1122 | |||
1123 | finish_wait(&vcpu->wq, &wait); | ||
1124 | } | ||
1125 | EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); | ||
1126 | |||
1708 | static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1127 | static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
1709 | { | 1128 | { |
1710 | struct kvm_vcpu *vcpu = vma->vm_file->private_data; | 1129 | struct kvm_vcpu *vcpu = vma->vm_file->private_data; |
@@ -1828,88 +1247,6 @@ static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) | |||
1828 | return 0; | 1247 | return 0; |
1829 | } | 1248 | } |
1830 | 1249 | ||
1831 | #ifdef __KVM_HAVE_MSIX | ||
1832 | static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm, | ||
1833 | struct kvm_assigned_msix_nr *entry_nr) | ||
1834 | { | ||
1835 | int r = 0; | ||
1836 | struct kvm_assigned_dev_kernel *adev; | ||
1837 | |||
1838 | mutex_lock(&kvm->lock); | ||
1839 | |||
1840 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
1841 | entry_nr->assigned_dev_id); | ||
1842 | if (!adev) { | ||
1843 | r = -EINVAL; | ||
1844 | goto msix_nr_out; | ||
1845 | } | ||
1846 | |||
1847 | if (adev->entries_nr == 0) { | ||
1848 | adev->entries_nr = entry_nr->entry_nr; | ||
1849 | if (adev->entries_nr == 0 || | ||
1850 | adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) { | ||
1851 | r = -EINVAL; | ||
1852 | goto msix_nr_out; | ||
1853 | } | ||
1854 | |||
1855 | adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) * | ||
1856 | entry_nr->entry_nr, | ||
1857 | GFP_KERNEL); | ||
1858 | if (!adev->host_msix_entries) { | ||
1859 | r = -ENOMEM; | ||
1860 | goto msix_nr_out; | ||
1861 | } | ||
1862 | adev->guest_msix_entries = kzalloc( | ||
1863 | sizeof(struct kvm_guest_msix_entry) * | ||
1864 | entry_nr->entry_nr, GFP_KERNEL); | ||
1865 | if (!adev->guest_msix_entries) { | ||
1866 | kfree(adev->host_msix_entries); | ||
1867 | r = -ENOMEM; | ||
1868 | goto msix_nr_out; | ||
1869 | } | ||
1870 | } else /* Not allowed set MSI-X number twice */ | ||
1871 | r = -EINVAL; | ||
1872 | msix_nr_out: | ||
1873 | mutex_unlock(&kvm->lock); | ||
1874 | return r; | ||
1875 | } | ||
1876 | |||
1877 | static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm, | ||
1878 | struct kvm_assigned_msix_entry *entry) | ||
1879 | { | ||
1880 | int r = 0, i; | ||
1881 | struct kvm_assigned_dev_kernel *adev; | ||
1882 | |||
1883 | mutex_lock(&kvm->lock); | ||
1884 | |||
1885 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
1886 | entry->assigned_dev_id); | ||
1887 | |||
1888 | if (!adev) { | ||
1889 | r = -EINVAL; | ||
1890 | goto msix_entry_out; | ||
1891 | } | ||
1892 | |||
1893 | for (i = 0; i < adev->entries_nr; i++) | ||
1894 | if (adev->guest_msix_entries[i].vector == 0 || | ||
1895 | adev->guest_msix_entries[i].entry == entry->entry) { | ||
1896 | adev->guest_msix_entries[i].entry = entry->entry; | ||
1897 | adev->guest_msix_entries[i].vector = entry->gsi; | ||
1898 | adev->host_msix_entries[i].entry = entry->entry; | ||
1899 | break; | ||
1900 | } | ||
1901 | if (i == adev->entries_nr) { | ||
1902 | r = -ENOSPC; | ||
1903 | goto msix_entry_out; | ||
1904 | } | ||
1905 | |||
1906 | msix_entry_out: | ||
1907 | mutex_unlock(&kvm->lock); | ||
1908 | |||
1909 | return r; | ||
1910 | } | ||
1911 | #endif | ||
1912 | |||
1913 | static long kvm_vcpu_ioctl(struct file *filp, | 1250 | static long kvm_vcpu_ioctl(struct file *filp, |
1914 | unsigned int ioctl, unsigned long arg) | 1251 | unsigned int ioctl, unsigned long arg) |
1915 | { | 1252 | { |
@@ -2168,112 +1505,6 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2168 | break; | 1505 | break; |
2169 | } | 1506 | } |
2170 | #endif | 1507 | #endif |
2171 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | ||
2172 | case KVM_ASSIGN_PCI_DEVICE: { | ||
2173 | struct kvm_assigned_pci_dev assigned_dev; | ||
2174 | |||
2175 | r = -EFAULT; | ||
2176 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | ||
2177 | goto out; | ||
2178 | r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev); | ||
2179 | if (r) | ||
2180 | goto out; | ||
2181 | break; | ||
2182 | } | ||
2183 | case KVM_ASSIGN_IRQ: { | ||
2184 | r = -EOPNOTSUPP; | ||
2185 | break; | ||
2186 | } | ||
2187 | #ifdef KVM_CAP_ASSIGN_DEV_IRQ | ||
2188 | case KVM_ASSIGN_DEV_IRQ: { | ||
2189 | struct kvm_assigned_irq assigned_irq; | ||
2190 | |||
2191 | r = -EFAULT; | ||
2192 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) | ||
2193 | goto out; | ||
2194 | r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq); | ||
2195 | if (r) | ||
2196 | goto out; | ||
2197 | break; | ||
2198 | } | ||
2199 | case KVM_DEASSIGN_DEV_IRQ: { | ||
2200 | struct kvm_assigned_irq assigned_irq; | ||
2201 | |||
2202 | r = -EFAULT; | ||
2203 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) | ||
2204 | goto out; | ||
2205 | r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq); | ||
2206 | if (r) | ||
2207 | goto out; | ||
2208 | break; | ||
2209 | } | ||
2210 | #endif | ||
2211 | #endif | ||
2212 | #ifdef KVM_CAP_DEVICE_DEASSIGNMENT | ||
2213 | case KVM_DEASSIGN_PCI_DEVICE: { | ||
2214 | struct kvm_assigned_pci_dev assigned_dev; | ||
2215 | |||
2216 | r = -EFAULT; | ||
2217 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | ||
2218 | goto out; | ||
2219 | r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev); | ||
2220 | if (r) | ||
2221 | goto out; | ||
2222 | break; | ||
2223 | } | ||
2224 | #endif | ||
2225 | #ifdef KVM_CAP_IRQ_ROUTING | ||
2226 | case KVM_SET_GSI_ROUTING: { | ||
2227 | struct kvm_irq_routing routing; | ||
2228 | struct kvm_irq_routing __user *urouting; | ||
2229 | struct kvm_irq_routing_entry *entries; | ||
2230 | |||
2231 | r = -EFAULT; | ||
2232 | if (copy_from_user(&routing, argp, sizeof(routing))) | ||
2233 | goto out; | ||
2234 | r = -EINVAL; | ||
2235 | if (routing.nr >= KVM_MAX_IRQ_ROUTES) | ||
2236 | goto out; | ||
2237 | if (routing.flags) | ||
2238 | goto out; | ||
2239 | r = -ENOMEM; | ||
2240 | entries = vmalloc(routing.nr * sizeof(*entries)); | ||
2241 | if (!entries) | ||
2242 | goto out; | ||
2243 | r = -EFAULT; | ||
2244 | urouting = argp; | ||
2245 | if (copy_from_user(entries, urouting->entries, | ||
2246 | routing.nr * sizeof(*entries))) | ||
2247 | goto out_free_irq_routing; | ||
2248 | r = kvm_set_irq_routing(kvm, entries, routing.nr, | ||
2249 | routing.flags); | ||
2250 | out_free_irq_routing: | ||
2251 | vfree(entries); | ||
2252 | break; | ||
2253 | } | ||
2254 | #endif /* KVM_CAP_IRQ_ROUTING */ | ||
2255 | #ifdef __KVM_HAVE_MSIX | ||
2256 | case KVM_ASSIGN_SET_MSIX_NR: { | ||
2257 | struct kvm_assigned_msix_nr entry_nr; | ||
2258 | r = -EFAULT; | ||
2259 | if (copy_from_user(&entry_nr, argp, sizeof entry_nr)) | ||
2260 | goto out; | ||
2261 | r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr); | ||
2262 | if (r) | ||
2263 | goto out; | ||
2264 | break; | ||
2265 | } | ||
2266 | case KVM_ASSIGN_SET_MSIX_ENTRY: { | ||
2267 | struct kvm_assigned_msix_entry entry; | ||
2268 | r = -EFAULT; | ||
2269 | if (copy_from_user(&entry, argp, sizeof entry)) | ||
2270 | goto out; | ||
2271 | r = kvm_vm_ioctl_set_msix_entry(kvm, &entry); | ||
2272 | if (r) | ||
2273 | goto out; | ||
2274 | break; | ||
2275 | } | ||
2276 | #endif | ||
2277 | case KVM_IRQFD: { | 1508 | case KVM_IRQFD: { |
2278 | struct kvm_irqfd data; | 1509 | struct kvm_irqfd data; |
2279 | 1510 | ||
@@ -2305,11 +1536,59 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2305 | #endif | 1536 | #endif |
2306 | default: | 1537 | default: |
2307 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); | 1538 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); |
1539 | if (r == -ENOTTY) | ||
1540 | r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); | ||
2308 | } | 1541 | } |
2309 | out: | 1542 | out: |
2310 | return r; | 1543 | return r; |
2311 | } | 1544 | } |
2312 | 1545 | ||
1546 | #ifdef CONFIG_COMPAT | ||
1547 | struct compat_kvm_dirty_log { | ||
1548 | __u32 slot; | ||
1549 | __u32 padding1; | ||
1550 | union { | ||
1551 | compat_uptr_t dirty_bitmap; /* one bit per page */ | ||
1552 | __u64 padding2; | ||
1553 | }; | ||
1554 | }; | ||
1555 | |||
1556 | static long kvm_vm_compat_ioctl(struct file *filp, | ||
1557 | unsigned int ioctl, unsigned long arg) | ||
1558 | { | ||
1559 | struct kvm *kvm = filp->private_data; | ||
1560 | int r; | ||
1561 | |||
1562 | if (kvm->mm != current->mm) | ||
1563 | return -EIO; | ||
1564 | switch (ioctl) { | ||
1565 | case KVM_GET_DIRTY_LOG: { | ||
1566 | struct compat_kvm_dirty_log compat_log; | ||
1567 | struct kvm_dirty_log log; | ||
1568 | |||
1569 | r = -EFAULT; | ||
1570 | if (copy_from_user(&compat_log, (void __user *)arg, | ||
1571 | sizeof(compat_log))) | ||
1572 | goto out; | ||
1573 | log.slot = compat_log.slot; | ||
1574 | log.padding1 = compat_log.padding1; | ||
1575 | log.padding2 = compat_log.padding2; | ||
1576 | log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); | ||
1577 | |||
1578 | r = kvm_vm_ioctl_get_dirty_log(kvm, &log); | ||
1579 | if (r) | ||
1580 | goto out; | ||
1581 | break; | ||
1582 | } | ||
1583 | default: | ||
1584 | r = kvm_vm_ioctl(filp, ioctl, arg); | ||
1585 | } | ||
1586 | |||
1587 | out: | ||
1588 | return r; | ||
1589 | } | ||
1590 | #endif | ||
1591 | |||
2313 | static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1592 | static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
2314 | { | 1593 | { |
2315 | struct page *page[1]; | 1594 | struct page *page[1]; |
@@ -2344,7 +1623,9 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) | |||
2344 | static struct file_operations kvm_vm_fops = { | 1623 | static struct file_operations kvm_vm_fops = { |
2345 | .release = kvm_vm_release, | 1624 | .release = kvm_vm_release, |
2346 | .unlocked_ioctl = kvm_vm_ioctl, | 1625 | .unlocked_ioctl = kvm_vm_ioctl, |
2347 | .compat_ioctl = kvm_vm_ioctl, | 1626 | #ifdef CONFIG_COMPAT |
1627 | .compat_ioctl = kvm_vm_compat_ioctl, | ||
1628 | #endif | ||
2348 | .mmap = kvm_vm_mmap, | 1629 | .mmap = kvm_vm_mmap, |
2349 | }; | 1630 | }; |
2350 | 1631 | ||
@@ -2372,6 +1653,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg) | |||
2372 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE | 1653 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE |
2373 | case KVM_CAP_SET_BOOT_CPU_ID: | 1654 | case KVM_CAP_SET_BOOT_CPU_ID: |
2374 | #endif | 1655 | #endif |
1656 | case KVM_CAP_INTERNAL_ERROR_DATA: | ||
2375 | return 1; | 1657 | return 1; |
2376 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | 1658 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
2377 | case KVM_CAP_IRQ_ROUTING: | 1659 | case KVM_CAP_IRQ_ROUTING: |
@@ -2442,11 +1724,21 @@ static struct miscdevice kvm_dev = { | |||
2442 | static void hardware_enable(void *junk) | 1724 | static void hardware_enable(void *junk) |
2443 | { | 1725 | { |
2444 | int cpu = raw_smp_processor_id(); | 1726 | int cpu = raw_smp_processor_id(); |
1727 | int r; | ||
2445 | 1728 | ||
2446 | if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) | 1729 | if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) |
2447 | return; | 1730 | return; |
1731 | |||
2448 | cpumask_set_cpu(cpu, cpus_hardware_enabled); | 1732 | cpumask_set_cpu(cpu, cpus_hardware_enabled); |
2449 | kvm_arch_hardware_enable(NULL); | 1733 | |
1734 | r = kvm_arch_hardware_enable(NULL); | ||
1735 | |||
1736 | if (r) { | ||
1737 | cpumask_clear_cpu(cpu, cpus_hardware_enabled); | ||
1738 | atomic_inc(&hardware_enable_failed); | ||
1739 | printk(KERN_INFO "kvm: enabling virtualization on " | ||
1740 | "CPU%d failed\n", cpu); | ||
1741 | } | ||
2450 | } | 1742 | } |
2451 | 1743 | ||
2452 | static void hardware_disable(void *junk) | 1744 | static void hardware_disable(void *junk) |
@@ -2459,11 +1751,52 @@ static void hardware_disable(void *junk) | |||
2459 | kvm_arch_hardware_disable(NULL); | 1751 | kvm_arch_hardware_disable(NULL); |
2460 | } | 1752 | } |
2461 | 1753 | ||
1754 | static void hardware_disable_all_nolock(void) | ||
1755 | { | ||
1756 | BUG_ON(!kvm_usage_count); | ||
1757 | |||
1758 | kvm_usage_count--; | ||
1759 | if (!kvm_usage_count) | ||
1760 | on_each_cpu(hardware_disable, NULL, 1); | ||
1761 | } | ||
1762 | |||
1763 | static void hardware_disable_all(void) | ||
1764 | { | ||
1765 | spin_lock(&kvm_lock); | ||
1766 | hardware_disable_all_nolock(); | ||
1767 | spin_unlock(&kvm_lock); | ||
1768 | } | ||
1769 | |||
1770 | static int hardware_enable_all(void) | ||
1771 | { | ||
1772 | int r = 0; | ||
1773 | |||
1774 | spin_lock(&kvm_lock); | ||
1775 | |||
1776 | kvm_usage_count++; | ||
1777 | if (kvm_usage_count == 1) { | ||
1778 | atomic_set(&hardware_enable_failed, 0); | ||
1779 | on_each_cpu(hardware_enable, NULL, 1); | ||
1780 | |||
1781 | if (atomic_read(&hardware_enable_failed)) { | ||
1782 | hardware_disable_all_nolock(); | ||
1783 | r = -EBUSY; | ||
1784 | } | ||
1785 | } | ||
1786 | |||
1787 | spin_unlock(&kvm_lock); | ||
1788 | |||
1789 | return r; | ||
1790 | } | ||
1791 | |||
2462 | static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | 1792 | static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, |
2463 | void *v) | 1793 | void *v) |
2464 | { | 1794 | { |
2465 | int cpu = (long)v; | 1795 | int cpu = (long)v; |
2466 | 1796 | ||
1797 | if (!kvm_usage_count) | ||
1798 | return NOTIFY_OK; | ||
1799 | |||
2467 | val &= ~CPU_TASKS_FROZEN; | 1800 | val &= ~CPU_TASKS_FROZEN; |
2468 | switch (val) { | 1801 | switch (val) { |
2469 | case CPU_DYING: | 1802 | case CPU_DYING: |
@@ -2666,13 +1999,15 @@ static void kvm_exit_debug(void) | |||
2666 | 1999 | ||
2667 | static int kvm_suspend(struct sys_device *dev, pm_message_t state) | 2000 | static int kvm_suspend(struct sys_device *dev, pm_message_t state) |
2668 | { | 2001 | { |
2669 | hardware_disable(NULL); | 2002 | if (kvm_usage_count) |
2003 | hardware_disable(NULL); | ||
2670 | return 0; | 2004 | return 0; |
2671 | } | 2005 | } |
2672 | 2006 | ||
2673 | static int kvm_resume(struct sys_device *dev) | 2007 | static int kvm_resume(struct sys_device *dev) |
2674 | { | 2008 | { |
2675 | hardware_enable(NULL); | 2009 | if (kvm_usage_count) |
2010 | hardware_enable(NULL); | ||
2676 | return 0; | 2011 | return 0; |
2677 | } | 2012 | } |
2678 | 2013 | ||
@@ -2747,7 +2082,6 @@ int kvm_init(void *opaque, unsigned int vcpu_size, | |||
2747 | goto out_free_1; | 2082 | goto out_free_1; |
2748 | } | 2083 | } |
2749 | 2084 | ||
2750 | on_each_cpu(hardware_enable, NULL, 1); | ||
2751 | r = register_cpu_notifier(&kvm_cpu_notifier); | 2085 | r = register_cpu_notifier(&kvm_cpu_notifier); |
2752 | if (r) | 2086 | if (r) |
2753 | goto out_free_2; | 2087 | goto out_free_2; |
@@ -2797,7 +2131,6 @@ out_free_3: | |||
2797 | unregister_reboot_notifier(&kvm_reboot_notifier); | 2131 | unregister_reboot_notifier(&kvm_reboot_notifier); |
2798 | unregister_cpu_notifier(&kvm_cpu_notifier); | 2132 | unregister_cpu_notifier(&kvm_cpu_notifier); |
2799 | out_free_2: | 2133 | out_free_2: |
2800 | on_each_cpu(hardware_disable, NULL, 1); | ||
2801 | out_free_1: | 2134 | out_free_1: |
2802 | kvm_arch_hardware_unsetup(); | 2135 | kvm_arch_hardware_unsetup(); |
2803 | out_free_0a: | 2136 | out_free_0a: |