diff options
author | Avi Kivity <avi@redhat.com> | 2009-08-26 07:57:50 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-12-03 02:32:09 -0500 |
commit | bfd99ff5d483b11c32bca49fbff7a5ac59038b0a (patch) | |
tree | e48d9dbf89f6f3940133f7b80559740152af3f7b | |
parent | 367e1319b229110a27c53221c2fa32a6aa86d4a9 (diff) |
KVM: Move assigned device code to own file
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/ia64/kvm/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/kvm/Makefile | 3 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 17 | ||||
-rw-r--r-- | virt/kvm/assigned-dev.c | 818 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 798 |
5 files changed, 840 insertions, 798 deletions
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile index 0bb99b732908..1089b3e918ac 100644 --- a/arch/ia64/kvm/Makefile +++ b/arch/ia64/kvm/Makefile | |||
@@ -49,7 +49,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ | |||
49 | EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ | 49 | EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ |
50 | 50 | ||
51 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ | 51 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ |
52 | coalesced_mmio.o irq_comm.o) | 52 | coalesced_mmio.o irq_comm.o assigned-dev.o) |
53 | 53 | ||
54 | ifeq ($(CONFIG_IOMMU_API),y) | 54 | ifeq ($(CONFIG_IOMMU_API),y) |
55 | common-objs += $(addprefix ../../../virt/kvm/, iommu.o) | 55 | common-objs += $(addprefix ../../../virt/kvm/, iommu.o) |
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 0e7fe78d0f74..31a7035c4bd9 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile | |||
@@ -6,7 +6,8 @@ CFLAGS_svm.o := -I. | |||
6 | CFLAGS_vmx.o := -I. | 6 | CFLAGS_vmx.o := -I. |
7 | 7 | ||
8 | kvm-y += $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ | 8 | kvm-y += $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ |
9 | coalesced_mmio.o irq_comm.o eventfd.o) | 9 | coalesced_mmio.o irq_comm.o eventfd.o \ |
10 | assigned-dev.o) | ||
10 | kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o) | 11 | kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o) |
11 | 12 | ||
12 | kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ | 13 | kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 4aa5e1d9a797..c0a1cc35f080 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -577,4 +577,21 @@ static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) | |||
577 | return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; | 577 | return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; |
578 | } | 578 | } |
579 | #endif | 579 | #endif |
580 | |||
581 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT | ||
582 | |||
583 | long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, | ||
584 | unsigned long arg); | ||
585 | |||
586 | #else | ||
587 | |||
588 | static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, | ||
589 | unsigned long arg) | ||
590 | { | ||
591 | return -ENOTTY; | ||
592 | } | ||
593 | |||
580 | #endif | 594 | #endif |
595 | |||
596 | #endif | ||
597 | |||
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c new file mode 100644 index 000000000000..fd9c097b760a --- /dev/null +++ b/virt/kvm/assigned-dev.c | |||
@@ -0,0 +1,818 @@ | |||
1 | /* | ||
2 | * Kernel-based Virtual Machine - device assignment support | ||
3 | * | ||
4 | * Copyright (C) 2006-9 Red Hat, Inc | ||
5 | * | ||
6 | * This work is licensed under the terms of the GNU GPL, version 2. See | ||
7 | * the COPYING file in the top-level directory. | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/kvm_host.h> | ||
12 | #include <linux/kvm.h> | ||
13 | #include <linux/uaccess.h> | ||
14 | #include <linux/vmalloc.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include "irq.h" | ||
20 | |||
21 | static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, | ||
22 | int assigned_dev_id) | ||
23 | { | ||
24 | struct list_head *ptr; | ||
25 | struct kvm_assigned_dev_kernel *match; | ||
26 | |||
27 | list_for_each(ptr, head) { | ||
28 | match = list_entry(ptr, struct kvm_assigned_dev_kernel, list); | ||
29 | if (match->assigned_dev_id == assigned_dev_id) | ||
30 | return match; | ||
31 | } | ||
32 | return NULL; | ||
33 | } | ||
34 | |||
35 | static int find_index_from_host_irq(struct kvm_assigned_dev_kernel | ||
36 | *assigned_dev, int irq) | ||
37 | { | ||
38 | int i, index; | ||
39 | struct msix_entry *host_msix_entries; | ||
40 | |||
41 | host_msix_entries = assigned_dev->host_msix_entries; | ||
42 | |||
43 | index = -1; | ||
44 | for (i = 0; i < assigned_dev->entries_nr; i++) | ||
45 | if (irq == host_msix_entries[i].vector) { | ||
46 | index = i; | ||
47 | break; | ||
48 | } | ||
49 | if (index < 0) { | ||
50 | printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n"); | ||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | return index; | ||
55 | } | ||
56 | |||
57 | static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) | ||
58 | { | ||
59 | struct kvm_assigned_dev_kernel *assigned_dev; | ||
60 | struct kvm *kvm; | ||
61 | int i; | ||
62 | |||
63 | assigned_dev = container_of(work, struct kvm_assigned_dev_kernel, | ||
64 | interrupt_work); | ||
65 | kvm = assigned_dev->kvm; | ||
66 | |||
67 | spin_lock_irq(&assigned_dev->assigned_dev_lock); | ||
68 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | ||
69 | struct kvm_guest_msix_entry *guest_entries = | ||
70 | assigned_dev->guest_msix_entries; | ||
71 | for (i = 0; i < assigned_dev->entries_nr; i++) { | ||
72 | if (!(guest_entries[i].flags & | ||
73 | KVM_ASSIGNED_MSIX_PENDING)) | ||
74 | continue; | ||
75 | guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING; | ||
76 | kvm_set_irq(assigned_dev->kvm, | ||
77 | assigned_dev->irq_source_id, | ||
78 | guest_entries[i].vector, 1); | ||
79 | } | ||
80 | } else | ||
81 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, | ||
82 | assigned_dev->guest_irq, 1); | ||
83 | |||
84 | spin_unlock_irq(&assigned_dev->assigned_dev_lock); | ||
85 | } | ||
86 | |||
87 | static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) | ||
88 | { | ||
89 | unsigned long flags; | ||
90 | struct kvm_assigned_dev_kernel *assigned_dev = | ||
91 | (struct kvm_assigned_dev_kernel *) dev_id; | ||
92 | |||
93 | spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags); | ||
94 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | ||
95 | int index = find_index_from_host_irq(assigned_dev, irq); | ||
96 | if (index < 0) | ||
97 | goto out; | ||
98 | assigned_dev->guest_msix_entries[index].flags |= | ||
99 | KVM_ASSIGNED_MSIX_PENDING; | ||
100 | } | ||
101 | |||
102 | schedule_work(&assigned_dev->interrupt_work); | ||
103 | |||
104 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { | ||
105 | disable_irq_nosync(irq); | ||
106 | assigned_dev->host_irq_disabled = true; | ||
107 | } | ||
108 | |||
109 | out: | ||
110 | spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags); | ||
111 | return IRQ_HANDLED; | ||
112 | } | ||
113 | |||
114 | /* Ack the irq line for an assigned device */ | ||
115 | static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) | ||
116 | { | ||
117 | struct kvm_assigned_dev_kernel *dev; | ||
118 | unsigned long flags; | ||
119 | |||
120 | if (kian->gsi == -1) | ||
121 | return; | ||
122 | |||
123 | dev = container_of(kian, struct kvm_assigned_dev_kernel, | ||
124 | ack_notifier); | ||
125 | |||
126 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); | ||
127 | |||
128 | /* The guest irq may be shared so this ack may be | ||
129 | * from another device. | ||
130 | */ | ||
131 | spin_lock_irqsave(&dev->assigned_dev_lock, flags); | ||
132 | if (dev->host_irq_disabled) { | ||
133 | enable_irq(dev->host_irq); | ||
134 | dev->host_irq_disabled = false; | ||
135 | } | ||
136 | spin_unlock_irqrestore(&dev->assigned_dev_lock, flags); | ||
137 | } | ||
138 | |||
139 | static void deassign_guest_irq(struct kvm *kvm, | ||
140 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
141 | { | ||
142 | kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); | ||
143 | assigned_dev->ack_notifier.gsi = -1; | ||
144 | |||
145 | if (assigned_dev->irq_source_id != -1) | ||
146 | kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); | ||
147 | assigned_dev->irq_source_id = -1; | ||
148 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK); | ||
149 | } | ||
150 | |||
151 | /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */ | ||
152 | static void deassign_host_irq(struct kvm *kvm, | ||
153 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
154 | { | ||
155 | /* | ||
156 | * In kvm_free_device_irq, cancel_work_sync return true if: | ||
157 | * 1. work is scheduled, and then cancelled. | ||
158 | * 2. work callback is executed. | ||
159 | * | ||
160 | * The first one ensured that the irq is disabled and no more events | ||
161 | * would happen. But for the second one, the irq may be enabled (e.g. | ||
162 | * for MSI). So we disable irq here to prevent further events. | ||
163 | * | ||
164 | * Notice this maybe result in nested disable if the interrupt type is | ||
165 | * INTx, but it's OK for we are going to free it. | ||
166 | * | ||
167 | * If this function is a part of VM destroy, please ensure that till | ||
168 | * now, the kvm state is still legal for probably we also have to wait | ||
169 | * interrupt_work done. | ||
170 | */ | ||
171 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | ||
172 | int i; | ||
173 | for (i = 0; i < assigned_dev->entries_nr; i++) | ||
174 | disable_irq_nosync(assigned_dev-> | ||
175 | host_msix_entries[i].vector); | ||
176 | |||
177 | cancel_work_sync(&assigned_dev->interrupt_work); | ||
178 | |||
179 | for (i = 0; i < assigned_dev->entries_nr; i++) | ||
180 | free_irq(assigned_dev->host_msix_entries[i].vector, | ||
181 | (void *)assigned_dev); | ||
182 | |||
183 | assigned_dev->entries_nr = 0; | ||
184 | kfree(assigned_dev->host_msix_entries); | ||
185 | kfree(assigned_dev->guest_msix_entries); | ||
186 | pci_disable_msix(assigned_dev->dev); | ||
187 | } else { | ||
188 | /* Deal with MSI and INTx */ | ||
189 | disable_irq_nosync(assigned_dev->host_irq); | ||
190 | cancel_work_sync(&assigned_dev->interrupt_work); | ||
191 | |||
192 | free_irq(assigned_dev->host_irq, (void *)assigned_dev); | ||
193 | |||
194 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI) | ||
195 | pci_disable_msi(assigned_dev->dev); | ||
196 | } | ||
197 | |||
198 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK); | ||
199 | } | ||
200 | |||
201 | static int kvm_deassign_irq(struct kvm *kvm, | ||
202 | struct kvm_assigned_dev_kernel *assigned_dev, | ||
203 | unsigned long irq_requested_type) | ||
204 | { | ||
205 | unsigned long guest_irq_type, host_irq_type; | ||
206 | |||
207 | if (!irqchip_in_kernel(kvm)) | ||
208 | return -EINVAL; | ||
209 | /* no irq assignment to deassign */ | ||
210 | if (!assigned_dev->irq_requested_type) | ||
211 | return -ENXIO; | ||
212 | |||
213 | host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK; | ||
214 | guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK; | ||
215 | |||
216 | if (host_irq_type) | ||
217 | deassign_host_irq(kvm, assigned_dev); | ||
218 | if (guest_irq_type) | ||
219 | deassign_guest_irq(kvm, assigned_dev); | ||
220 | |||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | static void kvm_free_assigned_irq(struct kvm *kvm, | ||
225 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
226 | { | ||
227 | kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type); | ||
228 | } | ||
229 | |||
230 | static void kvm_free_assigned_device(struct kvm *kvm, | ||
231 | struct kvm_assigned_dev_kernel | ||
232 | *assigned_dev) | ||
233 | { | ||
234 | kvm_free_assigned_irq(kvm, assigned_dev); | ||
235 | |||
236 | pci_reset_function(assigned_dev->dev); | ||
237 | |||
238 | pci_release_regions(assigned_dev->dev); | ||
239 | pci_disable_device(assigned_dev->dev); | ||
240 | pci_dev_put(assigned_dev->dev); | ||
241 | |||
242 | list_del(&assigned_dev->list); | ||
243 | kfree(assigned_dev); | ||
244 | } | ||
245 | |||
246 | void kvm_free_all_assigned_devices(struct kvm *kvm) | ||
247 | { | ||
248 | struct list_head *ptr, *ptr2; | ||
249 | struct kvm_assigned_dev_kernel *assigned_dev; | ||
250 | |||
251 | list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { | ||
252 | assigned_dev = list_entry(ptr, | ||
253 | struct kvm_assigned_dev_kernel, | ||
254 | list); | ||
255 | |||
256 | kvm_free_assigned_device(kvm, assigned_dev); | ||
257 | } | ||
258 | } | ||
259 | |||
260 | static int assigned_device_enable_host_intx(struct kvm *kvm, | ||
261 | struct kvm_assigned_dev_kernel *dev) | ||
262 | { | ||
263 | dev->host_irq = dev->dev->irq; | ||
264 | /* Even though this is PCI, we don't want to use shared | ||
265 | * interrupts. Sharing host devices with guest-assigned devices | ||
266 | * on the same interrupt line is not a happy situation: there | ||
267 | * are going to be long delays in accepting, acking, etc. | ||
268 | */ | ||
269 | if (request_irq(dev->host_irq, kvm_assigned_dev_intr, | ||
270 | 0, "kvm_assigned_intx_device", (void *)dev)) | ||
271 | return -EIO; | ||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | #ifdef __KVM_HAVE_MSI | ||
276 | static int assigned_device_enable_host_msi(struct kvm *kvm, | ||
277 | struct kvm_assigned_dev_kernel *dev) | ||
278 | { | ||
279 | int r; | ||
280 | |||
281 | if (!dev->dev->msi_enabled) { | ||
282 | r = pci_enable_msi(dev->dev); | ||
283 | if (r) | ||
284 | return r; | ||
285 | } | ||
286 | |||
287 | dev->host_irq = dev->dev->irq; | ||
288 | if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0, | ||
289 | "kvm_assigned_msi_device", (void *)dev)) { | ||
290 | pci_disable_msi(dev->dev); | ||
291 | return -EIO; | ||
292 | } | ||
293 | |||
294 | return 0; | ||
295 | } | ||
296 | #endif | ||
297 | |||
298 | #ifdef __KVM_HAVE_MSIX | ||
299 | static int assigned_device_enable_host_msix(struct kvm *kvm, | ||
300 | struct kvm_assigned_dev_kernel *dev) | ||
301 | { | ||
302 | int i, r = -EINVAL; | ||
303 | |||
304 | /* host_msix_entries and guest_msix_entries should have been | ||
305 | * initialized */ | ||
306 | if (dev->entries_nr == 0) | ||
307 | return r; | ||
308 | |||
309 | r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr); | ||
310 | if (r) | ||
311 | return r; | ||
312 | |||
313 | for (i = 0; i < dev->entries_nr; i++) { | ||
314 | r = request_irq(dev->host_msix_entries[i].vector, | ||
315 | kvm_assigned_dev_intr, 0, | ||
316 | "kvm_assigned_msix_device", | ||
317 | (void *)dev); | ||
318 | /* FIXME: free requested_irq's on failure */ | ||
319 | if (r) | ||
320 | return r; | ||
321 | } | ||
322 | |||
323 | return 0; | ||
324 | } | ||
325 | |||
326 | #endif | ||
327 | |||
328 | static int assigned_device_enable_guest_intx(struct kvm *kvm, | ||
329 | struct kvm_assigned_dev_kernel *dev, | ||
330 | struct kvm_assigned_irq *irq) | ||
331 | { | ||
332 | dev->guest_irq = irq->guest_irq; | ||
333 | dev->ack_notifier.gsi = irq->guest_irq; | ||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | #ifdef __KVM_HAVE_MSI | ||
338 | static int assigned_device_enable_guest_msi(struct kvm *kvm, | ||
339 | struct kvm_assigned_dev_kernel *dev, | ||
340 | struct kvm_assigned_irq *irq) | ||
341 | { | ||
342 | dev->guest_irq = irq->guest_irq; | ||
343 | dev->ack_notifier.gsi = -1; | ||
344 | dev->host_irq_disabled = false; | ||
345 | return 0; | ||
346 | } | ||
347 | #endif | ||
348 | |||
349 | #ifdef __KVM_HAVE_MSIX | ||
350 | static int assigned_device_enable_guest_msix(struct kvm *kvm, | ||
351 | struct kvm_assigned_dev_kernel *dev, | ||
352 | struct kvm_assigned_irq *irq) | ||
353 | { | ||
354 | dev->guest_irq = irq->guest_irq; | ||
355 | dev->ack_notifier.gsi = -1; | ||
356 | dev->host_irq_disabled = false; | ||
357 | return 0; | ||
358 | } | ||
359 | #endif | ||
360 | |||
361 | static int assign_host_irq(struct kvm *kvm, | ||
362 | struct kvm_assigned_dev_kernel *dev, | ||
363 | __u32 host_irq_type) | ||
364 | { | ||
365 | int r = -EEXIST; | ||
366 | |||
367 | if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK) | ||
368 | return r; | ||
369 | |||
370 | switch (host_irq_type) { | ||
371 | case KVM_DEV_IRQ_HOST_INTX: | ||
372 | r = assigned_device_enable_host_intx(kvm, dev); | ||
373 | break; | ||
374 | #ifdef __KVM_HAVE_MSI | ||
375 | case KVM_DEV_IRQ_HOST_MSI: | ||
376 | r = assigned_device_enable_host_msi(kvm, dev); | ||
377 | break; | ||
378 | #endif | ||
379 | #ifdef __KVM_HAVE_MSIX | ||
380 | case KVM_DEV_IRQ_HOST_MSIX: | ||
381 | r = assigned_device_enable_host_msix(kvm, dev); | ||
382 | break; | ||
383 | #endif | ||
384 | default: | ||
385 | r = -EINVAL; | ||
386 | } | ||
387 | |||
388 | if (!r) | ||
389 | dev->irq_requested_type |= host_irq_type; | ||
390 | |||
391 | return r; | ||
392 | } | ||
393 | |||
394 | static int assign_guest_irq(struct kvm *kvm, | ||
395 | struct kvm_assigned_dev_kernel *dev, | ||
396 | struct kvm_assigned_irq *irq, | ||
397 | unsigned long guest_irq_type) | ||
398 | { | ||
399 | int id; | ||
400 | int r = -EEXIST; | ||
401 | |||
402 | if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK) | ||
403 | return r; | ||
404 | |||
405 | id = kvm_request_irq_source_id(kvm); | ||
406 | if (id < 0) | ||
407 | return id; | ||
408 | |||
409 | dev->irq_source_id = id; | ||
410 | |||
411 | switch (guest_irq_type) { | ||
412 | case KVM_DEV_IRQ_GUEST_INTX: | ||
413 | r = assigned_device_enable_guest_intx(kvm, dev, irq); | ||
414 | break; | ||
415 | #ifdef __KVM_HAVE_MSI | ||
416 | case KVM_DEV_IRQ_GUEST_MSI: | ||
417 | r = assigned_device_enable_guest_msi(kvm, dev, irq); | ||
418 | break; | ||
419 | #endif | ||
420 | #ifdef __KVM_HAVE_MSIX | ||
421 | case KVM_DEV_IRQ_GUEST_MSIX: | ||
422 | r = assigned_device_enable_guest_msix(kvm, dev, irq); | ||
423 | break; | ||
424 | #endif | ||
425 | default: | ||
426 | r = -EINVAL; | ||
427 | } | ||
428 | |||
429 | if (!r) { | ||
430 | dev->irq_requested_type |= guest_irq_type; | ||
431 | kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier); | ||
432 | } else | ||
433 | kvm_free_irq_source_id(kvm, dev->irq_source_id); | ||
434 | |||
435 | return r; | ||
436 | } | ||
437 | |||
438 | /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */ | ||
439 | static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, | ||
440 | struct kvm_assigned_irq *assigned_irq) | ||
441 | { | ||
442 | int r = -EINVAL; | ||
443 | struct kvm_assigned_dev_kernel *match; | ||
444 | unsigned long host_irq_type, guest_irq_type; | ||
445 | |||
446 | if (!capable(CAP_SYS_RAWIO)) | ||
447 | return -EPERM; | ||
448 | |||
449 | if (!irqchip_in_kernel(kvm)) | ||
450 | return r; | ||
451 | |||
452 | mutex_lock(&kvm->lock); | ||
453 | r = -ENODEV; | ||
454 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
455 | assigned_irq->assigned_dev_id); | ||
456 | if (!match) | ||
457 | goto out; | ||
458 | |||
459 | host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK); | ||
460 | guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK); | ||
461 | |||
462 | r = -EINVAL; | ||
463 | /* can only assign one type at a time */ | ||
464 | if (hweight_long(host_irq_type) > 1) | ||
465 | goto out; | ||
466 | if (hweight_long(guest_irq_type) > 1) | ||
467 | goto out; | ||
468 | if (host_irq_type == 0 && guest_irq_type == 0) | ||
469 | goto out; | ||
470 | |||
471 | r = 0; | ||
472 | if (host_irq_type) | ||
473 | r = assign_host_irq(kvm, match, host_irq_type); | ||
474 | if (r) | ||
475 | goto out; | ||
476 | |||
477 | if (guest_irq_type) | ||
478 | r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type); | ||
479 | out: | ||
480 | mutex_unlock(&kvm->lock); | ||
481 | return r; | ||
482 | } | ||
483 | |||
484 | static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, | ||
485 | struct kvm_assigned_irq | ||
486 | *assigned_irq) | ||
487 | { | ||
488 | int r = -ENODEV; | ||
489 | struct kvm_assigned_dev_kernel *match; | ||
490 | |||
491 | mutex_lock(&kvm->lock); | ||
492 | |||
493 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
494 | assigned_irq->assigned_dev_id); | ||
495 | if (!match) | ||
496 | goto out; | ||
497 | |||
498 | r = kvm_deassign_irq(kvm, match, assigned_irq->flags); | ||
499 | out: | ||
500 | mutex_unlock(&kvm->lock); | ||
501 | return r; | ||
502 | } | ||
503 | |||
504 | static int kvm_vm_ioctl_assign_device(struct kvm *kvm, | ||
505 | struct kvm_assigned_pci_dev *assigned_dev) | ||
506 | { | ||
507 | int r = 0; | ||
508 | struct kvm_assigned_dev_kernel *match; | ||
509 | struct pci_dev *dev; | ||
510 | |||
511 | down_read(&kvm->slots_lock); | ||
512 | mutex_lock(&kvm->lock); | ||
513 | |||
514 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
515 | assigned_dev->assigned_dev_id); | ||
516 | if (match) { | ||
517 | /* device already assigned */ | ||
518 | r = -EEXIST; | ||
519 | goto out; | ||
520 | } | ||
521 | |||
522 | match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL); | ||
523 | if (match == NULL) { | ||
524 | printk(KERN_INFO "%s: Couldn't allocate memory\n", | ||
525 | __func__); | ||
526 | r = -ENOMEM; | ||
527 | goto out; | ||
528 | } | ||
529 | dev = pci_get_bus_and_slot(assigned_dev->busnr, | ||
530 | assigned_dev->devfn); | ||
531 | if (!dev) { | ||
532 | printk(KERN_INFO "%s: host device not found\n", __func__); | ||
533 | r = -EINVAL; | ||
534 | goto out_free; | ||
535 | } | ||
536 | if (pci_enable_device(dev)) { | ||
537 | printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); | ||
538 | r = -EBUSY; | ||
539 | goto out_put; | ||
540 | } | ||
541 | r = pci_request_regions(dev, "kvm_assigned_device"); | ||
542 | if (r) { | ||
543 | printk(KERN_INFO "%s: Could not get access to device regions\n", | ||
544 | __func__); | ||
545 | goto out_disable; | ||
546 | } | ||
547 | |||
548 | pci_reset_function(dev); | ||
549 | |||
550 | match->assigned_dev_id = assigned_dev->assigned_dev_id; | ||
551 | match->host_busnr = assigned_dev->busnr; | ||
552 | match->host_devfn = assigned_dev->devfn; | ||
553 | match->flags = assigned_dev->flags; | ||
554 | match->dev = dev; | ||
555 | spin_lock_init(&match->assigned_dev_lock); | ||
556 | match->irq_source_id = -1; | ||
557 | match->kvm = kvm; | ||
558 | match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; | ||
559 | INIT_WORK(&match->interrupt_work, | ||
560 | kvm_assigned_dev_interrupt_work_handler); | ||
561 | |||
562 | list_add(&match->list, &kvm->arch.assigned_dev_head); | ||
563 | |||
564 | if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { | ||
565 | if (!kvm->arch.iommu_domain) { | ||
566 | r = kvm_iommu_map_guest(kvm); | ||
567 | if (r) | ||
568 | goto out_list_del; | ||
569 | } | ||
570 | r = kvm_assign_device(kvm, match); | ||
571 | if (r) | ||
572 | goto out_list_del; | ||
573 | } | ||
574 | |||
575 | out: | ||
576 | mutex_unlock(&kvm->lock); | ||
577 | up_read(&kvm->slots_lock); | ||
578 | return r; | ||
579 | out_list_del: | ||
580 | list_del(&match->list); | ||
581 | pci_release_regions(dev); | ||
582 | out_disable: | ||
583 | pci_disable_device(dev); | ||
584 | out_put: | ||
585 | pci_dev_put(dev); | ||
586 | out_free: | ||
587 | kfree(match); | ||
588 | mutex_unlock(&kvm->lock); | ||
589 | up_read(&kvm->slots_lock); | ||
590 | return r; | ||
591 | } | ||
592 | |||
593 | static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, | ||
594 | struct kvm_assigned_pci_dev *assigned_dev) | ||
595 | { | ||
596 | int r = 0; | ||
597 | struct kvm_assigned_dev_kernel *match; | ||
598 | |||
599 | mutex_lock(&kvm->lock); | ||
600 | |||
601 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
602 | assigned_dev->assigned_dev_id); | ||
603 | if (!match) { | ||
604 | printk(KERN_INFO "%s: device hasn't been assigned before, " | ||
605 | "so cannot be deassigned\n", __func__); | ||
606 | r = -EINVAL; | ||
607 | goto out; | ||
608 | } | ||
609 | |||
610 | if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) | ||
611 | kvm_deassign_device(kvm, match); | ||
612 | |||
613 | kvm_free_assigned_device(kvm, match); | ||
614 | |||
615 | out: | ||
616 | mutex_unlock(&kvm->lock); | ||
617 | return r; | ||
618 | } | ||
619 | |||
620 | |||
621 | #ifdef __KVM_HAVE_MSIX | ||
622 | static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm, | ||
623 | struct kvm_assigned_msix_nr *entry_nr) | ||
624 | { | ||
625 | int r = 0; | ||
626 | struct kvm_assigned_dev_kernel *adev; | ||
627 | |||
628 | mutex_lock(&kvm->lock); | ||
629 | |||
630 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
631 | entry_nr->assigned_dev_id); | ||
632 | if (!adev) { | ||
633 | r = -EINVAL; | ||
634 | goto msix_nr_out; | ||
635 | } | ||
636 | |||
637 | if (adev->entries_nr == 0) { | ||
638 | adev->entries_nr = entry_nr->entry_nr; | ||
639 | if (adev->entries_nr == 0 || | ||
640 | adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) { | ||
641 | r = -EINVAL; | ||
642 | goto msix_nr_out; | ||
643 | } | ||
644 | |||
645 | adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) * | ||
646 | entry_nr->entry_nr, | ||
647 | GFP_KERNEL); | ||
648 | if (!adev->host_msix_entries) { | ||
649 | r = -ENOMEM; | ||
650 | goto msix_nr_out; | ||
651 | } | ||
652 | adev->guest_msix_entries = kzalloc( | ||
653 | sizeof(struct kvm_guest_msix_entry) * | ||
654 | entry_nr->entry_nr, GFP_KERNEL); | ||
655 | if (!adev->guest_msix_entries) { | ||
656 | kfree(adev->host_msix_entries); | ||
657 | r = -ENOMEM; | ||
658 | goto msix_nr_out; | ||
659 | } | ||
660 | } else /* Not allowed set MSI-X number twice */ | ||
661 | r = -EINVAL; | ||
662 | msix_nr_out: | ||
663 | mutex_unlock(&kvm->lock); | ||
664 | return r; | ||
665 | } | ||
666 | |||
667 | static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm, | ||
668 | struct kvm_assigned_msix_entry *entry) | ||
669 | { | ||
670 | int r = 0, i; | ||
671 | struct kvm_assigned_dev_kernel *adev; | ||
672 | |||
673 | mutex_lock(&kvm->lock); | ||
674 | |||
675 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
676 | entry->assigned_dev_id); | ||
677 | |||
678 | if (!adev) { | ||
679 | r = -EINVAL; | ||
680 | goto msix_entry_out; | ||
681 | } | ||
682 | |||
683 | for (i = 0; i < adev->entries_nr; i++) | ||
684 | if (adev->guest_msix_entries[i].vector == 0 || | ||
685 | adev->guest_msix_entries[i].entry == entry->entry) { | ||
686 | adev->guest_msix_entries[i].entry = entry->entry; | ||
687 | adev->guest_msix_entries[i].vector = entry->gsi; | ||
688 | adev->host_msix_entries[i].entry = entry->entry; | ||
689 | break; | ||
690 | } | ||
691 | if (i == adev->entries_nr) { | ||
692 | r = -ENOSPC; | ||
693 | goto msix_entry_out; | ||
694 | } | ||
695 | |||
696 | msix_entry_out: | ||
697 | mutex_unlock(&kvm->lock); | ||
698 | |||
699 | return r; | ||
700 | } | ||
701 | #endif | ||
702 | |||
703 | long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, | ||
704 | unsigned long arg) | ||
705 | { | ||
706 | void __user *argp = (void __user *)arg; | ||
707 | int r = -ENOTTY; | ||
708 | |||
709 | switch (ioctl) { | ||
710 | case KVM_ASSIGN_PCI_DEVICE: { | ||
711 | struct kvm_assigned_pci_dev assigned_dev; | ||
712 | |||
713 | r = -EFAULT; | ||
714 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | ||
715 | goto out; | ||
716 | r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev); | ||
717 | if (r) | ||
718 | goto out; | ||
719 | break; | ||
720 | } | ||
721 | case KVM_ASSIGN_IRQ: { | ||
722 | r = -EOPNOTSUPP; | ||
723 | break; | ||
724 | } | ||
725 | #ifdef KVM_CAP_ASSIGN_DEV_IRQ | ||
726 | case KVM_ASSIGN_DEV_IRQ: { | ||
727 | struct kvm_assigned_irq assigned_irq; | ||
728 | |||
729 | r = -EFAULT; | ||
730 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) | ||
731 | goto out; | ||
732 | r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq); | ||
733 | if (r) | ||
734 | goto out; | ||
735 | break; | ||
736 | } | ||
737 | case KVM_DEASSIGN_DEV_IRQ: { | ||
738 | struct kvm_assigned_irq assigned_irq; | ||
739 | |||
740 | r = -EFAULT; | ||
741 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) | ||
742 | goto out; | ||
743 | r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq); | ||
744 | if (r) | ||
745 | goto out; | ||
746 | break; | ||
747 | } | ||
748 | #endif | ||
749 | #ifdef KVM_CAP_DEVICE_DEASSIGNMENT | ||
750 | case KVM_DEASSIGN_PCI_DEVICE: { | ||
751 | struct kvm_assigned_pci_dev assigned_dev; | ||
752 | |||
753 | r = -EFAULT; | ||
754 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | ||
755 | goto out; | ||
756 | r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev); | ||
757 | if (r) | ||
758 | goto out; | ||
759 | break; | ||
760 | } | ||
761 | #endif | ||
762 | #ifdef KVM_CAP_IRQ_ROUTING | ||
763 | case KVM_SET_GSI_ROUTING: { | ||
764 | struct kvm_irq_routing routing; | ||
765 | struct kvm_irq_routing __user *urouting; | ||
766 | struct kvm_irq_routing_entry *entries; | ||
767 | |||
768 | r = -EFAULT; | ||
769 | if (copy_from_user(&routing, argp, sizeof(routing))) | ||
770 | goto out; | ||
771 | r = -EINVAL; | ||
772 | if (routing.nr >= KVM_MAX_IRQ_ROUTES) | ||
773 | goto out; | ||
774 | if (routing.flags) | ||
775 | goto out; | ||
776 | r = -ENOMEM; | ||
777 | entries = vmalloc(routing.nr * sizeof(*entries)); | ||
778 | if (!entries) | ||
779 | goto out; | ||
780 | r = -EFAULT; | ||
781 | urouting = argp; | ||
782 | if (copy_from_user(entries, urouting->entries, | ||
783 | routing.nr * sizeof(*entries))) | ||
784 | goto out_free_irq_routing; | ||
785 | r = kvm_set_irq_routing(kvm, entries, routing.nr, | ||
786 | routing.flags); | ||
787 | out_free_irq_routing: | ||
788 | vfree(entries); | ||
789 | break; | ||
790 | } | ||
791 | #endif /* KVM_CAP_IRQ_ROUTING */ | ||
792 | #ifdef __KVM_HAVE_MSIX | ||
793 | case KVM_ASSIGN_SET_MSIX_NR: { | ||
794 | struct kvm_assigned_msix_nr entry_nr; | ||
795 | r = -EFAULT; | ||
796 | if (copy_from_user(&entry_nr, argp, sizeof entry_nr)) | ||
797 | goto out; | ||
798 | r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr); | ||
799 | if (r) | ||
800 | goto out; | ||
801 | break; | ||
802 | } | ||
803 | case KVM_ASSIGN_SET_MSIX_ENTRY: { | ||
804 | struct kvm_assigned_msix_entry entry; | ||
805 | r = -EFAULT; | ||
806 | if (copy_from_user(&entry, argp, sizeof entry)) | ||
807 | goto out; | ||
808 | r = kvm_vm_ioctl_set_msix_entry(kvm, &entry); | ||
809 | if (r) | ||
810 | goto out; | ||
811 | break; | ||
812 | } | ||
813 | #endif | ||
814 | } | ||
815 | out: | ||
816 | return r; | ||
817 | } | ||
818 | |||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index c12c95b1b641..38e4d2c34ac1 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -53,12 +53,6 @@ | |||
53 | #include "coalesced_mmio.h" | 53 | #include "coalesced_mmio.h" |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | ||
57 | #include <linux/pci.h> | ||
58 | #include <linux/interrupt.h> | ||
59 | #include "irq.h" | ||
60 | #endif | ||
61 | |||
62 | #define CREATE_TRACE_POINTS | 56 | #define CREATE_TRACE_POINTS |
63 | #include <trace/events/kvm.h> | 57 | #include <trace/events/kvm.h> |
64 | 58 | ||
@@ -90,608 +84,6 @@ static bool kvm_rebooting; | |||
90 | 84 | ||
91 | static bool largepages_enabled = true; | 85 | static bool largepages_enabled = true; |
92 | 86 | ||
93 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | ||
94 | static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, | ||
95 | int assigned_dev_id) | ||
96 | { | ||
97 | struct list_head *ptr; | ||
98 | struct kvm_assigned_dev_kernel *match; | ||
99 | |||
100 | list_for_each(ptr, head) { | ||
101 | match = list_entry(ptr, struct kvm_assigned_dev_kernel, list); | ||
102 | if (match->assigned_dev_id == assigned_dev_id) | ||
103 | return match; | ||
104 | } | ||
105 | return NULL; | ||
106 | } | ||
107 | |||
108 | static int find_index_from_host_irq(struct kvm_assigned_dev_kernel | ||
109 | *assigned_dev, int irq) | ||
110 | { | ||
111 | int i, index; | ||
112 | struct msix_entry *host_msix_entries; | ||
113 | |||
114 | host_msix_entries = assigned_dev->host_msix_entries; | ||
115 | |||
116 | index = -1; | ||
117 | for (i = 0; i < assigned_dev->entries_nr; i++) | ||
118 | if (irq == host_msix_entries[i].vector) { | ||
119 | index = i; | ||
120 | break; | ||
121 | } | ||
122 | if (index < 0) { | ||
123 | printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n"); | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | return index; | ||
128 | } | ||
129 | |||
130 | static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) | ||
131 | { | ||
132 | struct kvm_assigned_dev_kernel *assigned_dev; | ||
133 | struct kvm *kvm; | ||
134 | int i; | ||
135 | |||
136 | assigned_dev = container_of(work, struct kvm_assigned_dev_kernel, | ||
137 | interrupt_work); | ||
138 | kvm = assigned_dev->kvm; | ||
139 | |||
140 | spin_lock_irq(&assigned_dev->assigned_dev_lock); | ||
141 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | ||
142 | struct kvm_guest_msix_entry *guest_entries = | ||
143 | assigned_dev->guest_msix_entries; | ||
144 | for (i = 0; i < assigned_dev->entries_nr; i++) { | ||
145 | if (!(guest_entries[i].flags & | ||
146 | KVM_ASSIGNED_MSIX_PENDING)) | ||
147 | continue; | ||
148 | guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING; | ||
149 | kvm_set_irq(assigned_dev->kvm, | ||
150 | assigned_dev->irq_source_id, | ||
151 | guest_entries[i].vector, 1); | ||
152 | } | ||
153 | } else | ||
154 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, | ||
155 | assigned_dev->guest_irq, 1); | ||
156 | |||
157 | spin_unlock_irq(&assigned_dev->assigned_dev_lock); | ||
158 | } | ||
159 | |||
160 | static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) | ||
161 | { | ||
162 | unsigned long flags; | ||
163 | struct kvm_assigned_dev_kernel *assigned_dev = | ||
164 | (struct kvm_assigned_dev_kernel *) dev_id; | ||
165 | |||
166 | spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags); | ||
167 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | ||
168 | int index = find_index_from_host_irq(assigned_dev, irq); | ||
169 | if (index < 0) | ||
170 | goto out; | ||
171 | assigned_dev->guest_msix_entries[index].flags |= | ||
172 | KVM_ASSIGNED_MSIX_PENDING; | ||
173 | } | ||
174 | |||
175 | schedule_work(&assigned_dev->interrupt_work); | ||
176 | |||
177 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { | ||
178 | disable_irq_nosync(irq); | ||
179 | assigned_dev->host_irq_disabled = true; | ||
180 | } | ||
181 | |||
182 | out: | ||
183 | spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags); | ||
184 | return IRQ_HANDLED; | ||
185 | } | ||
186 | |||
187 | /* Ack the irq line for an assigned device */ | ||
188 | static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) | ||
189 | { | ||
190 | struct kvm_assigned_dev_kernel *dev; | ||
191 | unsigned long flags; | ||
192 | |||
193 | if (kian->gsi == -1) | ||
194 | return; | ||
195 | |||
196 | dev = container_of(kian, struct kvm_assigned_dev_kernel, | ||
197 | ack_notifier); | ||
198 | |||
199 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); | ||
200 | |||
201 | /* The guest irq may be shared so this ack may be | ||
202 | * from another device. | ||
203 | */ | ||
204 | spin_lock_irqsave(&dev->assigned_dev_lock, flags); | ||
205 | if (dev->host_irq_disabled) { | ||
206 | enable_irq(dev->host_irq); | ||
207 | dev->host_irq_disabled = false; | ||
208 | } | ||
209 | spin_unlock_irqrestore(&dev->assigned_dev_lock, flags); | ||
210 | } | ||
211 | |||
212 | static void deassign_guest_irq(struct kvm *kvm, | ||
213 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
214 | { | ||
215 | kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); | ||
216 | assigned_dev->ack_notifier.gsi = -1; | ||
217 | |||
218 | if (assigned_dev->irq_source_id != -1) | ||
219 | kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); | ||
220 | assigned_dev->irq_source_id = -1; | ||
221 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK); | ||
222 | } | ||
223 | |||
224 | /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */ | ||
225 | static void deassign_host_irq(struct kvm *kvm, | ||
226 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
227 | { | ||
228 | /* | ||
229 | * In kvm_free_device_irq, cancel_work_sync return true if: | ||
230 | * 1. work is scheduled, and then cancelled. | ||
231 | * 2. work callback is executed. | ||
232 | * | ||
233 | * The first one ensured that the irq is disabled and no more events | ||
234 | * would happen. But for the second one, the irq may be enabled (e.g. | ||
235 | * for MSI). So we disable irq here to prevent further events. | ||
236 | * | ||
237 | * Notice this maybe result in nested disable if the interrupt type is | ||
238 | * INTx, but it's OK for we are going to free it. | ||
239 | * | ||
240 | * If this function is a part of VM destroy, please ensure that till | ||
241 | * now, the kvm state is still legal for probably we also have to wait | ||
242 | * interrupt_work done. | ||
243 | */ | ||
244 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | ||
245 | int i; | ||
246 | for (i = 0; i < assigned_dev->entries_nr; i++) | ||
247 | disable_irq_nosync(assigned_dev-> | ||
248 | host_msix_entries[i].vector); | ||
249 | |||
250 | cancel_work_sync(&assigned_dev->interrupt_work); | ||
251 | |||
252 | for (i = 0; i < assigned_dev->entries_nr; i++) | ||
253 | free_irq(assigned_dev->host_msix_entries[i].vector, | ||
254 | (void *)assigned_dev); | ||
255 | |||
256 | assigned_dev->entries_nr = 0; | ||
257 | kfree(assigned_dev->host_msix_entries); | ||
258 | kfree(assigned_dev->guest_msix_entries); | ||
259 | pci_disable_msix(assigned_dev->dev); | ||
260 | } else { | ||
261 | /* Deal with MSI and INTx */ | ||
262 | disable_irq_nosync(assigned_dev->host_irq); | ||
263 | cancel_work_sync(&assigned_dev->interrupt_work); | ||
264 | |||
265 | free_irq(assigned_dev->host_irq, (void *)assigned_dev); | ||
266 | |||
267 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI) | ||
268 | pci_disable_msi(assigned_dev->dev); | ||
269 | } | ||
270 | |||
271 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK); | ||
272 | } | ||
273 | |||
274 | static int kvm_deassign_irq(struct kvm *kvm, | ||
275 | struct kvm_assigned_dev_kernel *assigned_dev, | ||
276 | unsigned long irq_requested_type) | ||
277 | { | ||
278 | unsigned long guest_irq_type, host_irq_type; | ||
279 | |||
280 | if (!irqchip_in_kernel(kvm)) | ||
281 | return -EINVAL; | ||
282 | /* no irq assignment to deassign */ | ||
283 | if (!assigned_dev->irq_requested_type) | ||
284 | return -ENXIO; | ||
285 | |||
286 | host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK; | ||
287 | guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK; | ||
288 | |||
289 | if (host_irq_type) | ||
290 | deassign_host_irq(kvm, assigned_dev); | ||
291 | if (guest_irq_type) | ||
292 | deassign_guest_irq(kvm, assigned_dev); | ||
293 | |||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | static void kvm_free_assigned_irq(struct kvm *kvm, | ||
298 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
299 | { | ||
300 | kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type); | ||
301 | } | ||
302 | |||
303 | static void kvm_free_assigned_device(struct kvm *kvm, | ||
304 | struct kvm_assigned_dev_kernel | ||
305 | *assigned_dev) | ||
306 | { | ||
307 | kvm_free_assigned_irq(kvm, assigned_dev); | ||
308 | |||
309 | pci_reset_function(assigned_dev->dev); | ||
310 | |||
311 | pci_release_regions(assigned_dev->dev); | ||
312 | pci_disable_device(assigned_dev->dev); | ||
313 | pci_dev_put(assigned_dev->dev); | ||
314 | |||
315 | list_del(&assigned_dev->list); | ||
316 | kfree(assigned_dev); | ||
317 | } | ||
318 | |||
319 | void kvm_free_all_assigned_devices(struct kvm *kvm) | ||
320 | { | ||
321 | struct list_head *ptr, *ptr2; | ||
322 | struct kvm_assigned_dev_kernel *assigned_dev; | ||
323 | |||
324 | list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { | ||
325 | assigned_dev = list_entry(ptr, | ||
326 | struct kvm_assigned_dev_kernel, | ||
327 | list); | ||
328 | |||
329 | kvm_free_assigned_device(kvm, assigned_dev); | ||
330 | } | ||
331 | } | ||
332 | |||
333 | static int assigned_device_enable_host_intx(struct kvm *kvm, | ||
334 | struct kvm_assigned_dev_kernel *dev) | ||
335 | { | ||
336 | dev->host_irq = dev->dev->irq; | ||
337 | /* Even though this is PCI, we don't want to use shared | ||
338 | * interrupts. Sharing host devices with guest-assigned devices | ||
339 | * on the same interrupt line is not a happy situation: there | ||
340 | * are going to be long delays in accepting, acking, etc. | ||
341 | */ | ||
342 | if (request_irq(dev->host_irq, kvm_assigned_dev_intr, | ||
343 | 0, "kvm_assigned_intx_device", (void *)dev)) | ||
344 | return -EIO; | ||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | #ifdef __KVM_HAVE_MSI | ||
349 | static int assigned_device_enable_host_msi(struct kvm *kvm, | ||
350 | struct kvm_assigned_dev_kernel *dev) | ||
351 | { | ||
352 | int r; | ||
353 | |||
354 | if (!dev->dev->msi_enabled) { | ||
355 | r = pci_enable_msi(dev->dev); | ||
356 | if (r) | ||
357 | return r; | ||
358 | } | ||
359 | |||
360 | dev->host_irq = dev->dev->irq; | ||
361 | if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0, | ||
362 | "kvm_assigned_msi_device", (void *)dev)) { | ||
363 | pci_disable_msi(dev->dev); | ||
364 | return -EIO; | ||
365 | } | ||
366 | |||
367 | return 0; | ||
368 | } | ||
369 | #endif | ||
370 | |||
371 | #ifdef __KVM_HAVE_MSIX | ||
372 | static int assigned_device_enable_host_msix(struct kvm *kvm, | ||
373 | struct kvm_assigned_dev_kernel *dev) | ||
374 | { | ||
375 | int i, r = -EINVAL; | ||
376 | |||
377 | /* host_msix_entries and guest_msix_entries should have been | ||
378 | * initialized */ | ||
379 | if (dev->entries_nr == 0) | ||
380 | return r; | ||
381 | |||
382 | r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr); | ||
383 | if (r) | ||
384 | return r; | ||
385 | |||
386 | for (i = 0; i < dev->entries_nr; i++) { | ||
387 | r = request_irq(dev->host_msix_entries[i].vector, | ||
388 | kvm_assigned_dev_intr, 0, | ||
389 | "kvm_assigned_msix_device", | ||
390 | (void *)dev); | ||
391 | /* FIXME: free requested_irq's on failure */ | ||
392 | if (r) | ||
393 | return r; | ||
394 | } | ||
395 | |||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | #endif | ||
400 | |||
401 | static int assigned_device_enable_guest_intx(struct kvm *kvm, | ||
402 | struct kvm_assigned_dev_kernel *dev, | ||
403 | struct kvm_assigned_irq *irq) | ||
404 | { | ||
405 | dev->guest_irq = irq->guest_irq; | ||
406 | dev->ack_notifier.gsi = irq->guest_irq; | ||
407 | return 0; | ||
408 | } | ||
409 | |||
410 | #ifdef __KVM_HAVE_MSI | ||
411 | static int assigned_device_enable_guest_msi(struct kvm *kvm, | ||
412 | struct kvm_assigned_dev_kernel *dev, | ||
413 | struct kvm_assigned_irq *irq) | ||
414 | { | ||
415 | dev->guest_irq = irq->guest_irq; | ||
416 | dev->ack_notifier.gsi = -1; | ||
417 | dev->host_irq_disabled = false; | ||
418 | return 0; | ||
419 | } | ||
420 | #endif | ||
421 | #ifdef __KVM_HAVE_MSIX | ||
422 | static int assigned_device_enable_guest_msix(struct kvm *kvm, | ||
423 | struct kvm_assigned_dev_kernel *dev, | ||
424 | struct kvm_assigned_irq *irq) | ||
425 | { | ||
426 | dev->guest_irq = irq->guest_irq; | ||
427 | dev->ack_notifier.gsi = -1; | ||
428 | dev->host_irq_disabled = false; | ||
429 | return 0; | ||
430 | } | ||
431 | #endif | ||
432 | |||
433 | static int assign_host_irq(struct kvm *kvm, | ||
434 | struct kvm_assigned_dev_kernel *dev, | ||
435 | __u32 host_irq_type) | ||
436 | { | ||
437 | int r = -EEXIST; | ||
438 | |||
439 | if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK) | ||
440 | return r; | ||
441 | |||
442 | switch (host_irq_type) { | ||
443 | case KVM_DEV_IRQ_HOST_INTX: | ||
444 | r = assigned_device_enable_host_intx(kvm, dev); | ||
445 | break; | ||
446 | #ifdef __KVM_HAVE_MSI | ||
447 | case KVM_DEV_IRQ_HOST_MSI: | ||
448 | r = assigned_device_enable_host_msi(kvm, dev); | ||
449 | break; | ||
450 | #endif | ||
451 | #ifdef __KVM_HAVE_MSIX | ||
452 | case KVM_DEV_IRQ_HOST_MSIX: | ||
453 | r = assigned_device_enable_host_msix(kvm, dev); | ||
454 | break; | ||
455 | #endif | ||
456 | default: | ||
457 | r = -EINVAL; | ||
458 | } | ||
459 | |||
460 | if (!r) | ||
461 | dev->irq_requested_type |= host_irq_type; | ||
462 | |||
463 | return r; | ||
464 | } | ||
465 | |||
466 | static int assign_guest_irq(struct kvm *kvm, | ||
467 | struct kvm_assigned_dev_kernel *dev, | ||
468 | struct kvm_assigned_irq *irq, | ||
469 | unsigned long guest_irq_type) | ||
470 | { | ||
471 | int id; | ||
472 | int r = -EEXIST; | ||
473 | |||
474 | if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK) | ||
475 | return r; | ||
476 | |||
477 | id = kvm_request_irq_source_id(kvm); | ||
478 | if (id < 0) | ||
479 | return id; | ||
480 | |||
481 | dev->irq_source_id = id; | ||
482 | |||
483 | switch (guest_irq_type) { | ||
484 | case KVM_DEV_IRQ_GUEST_INTX: | ||
485 | r = assigned_device_enable_guest_intx(kvm, dev, irq); | ||
486 | break; | ||
487 | #ifdef __KVM_HAVE_MSI | ||
488 | case KVM_DEV_IRQ_GUEST_MSI: | ||
489 | r = assigned_device_enable_guest_msi(kvm, dev, irq); | ||
490 | break; | ||
491 | #endif | ||
492 | #ifdef __KVM_HAVE_MSIX | ||
493 | case KVM_DEV_IRQ_GUEST_MSIX: | ||
494 | r = assigned_device_enable_guest_msix(kvm, dev, irq); | ||
495 | break; | ||
496 | #endif | ||
497 | default: | ||
498 | r = -EINVAL; | ||
499 | } | ||
500 | |||
501 | if (!r) { | ||
502 | dev->irq_requested_type |= guest_irq_type; | ||
503 | kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier); | ||
504 | } else | ||
505 | kvm_free_irq_source_id(kvm, dev->irq_source_id); | ||
506 | |||
507 | return r; | ||
508 | } | ||
509 | |||
510 | /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */ | ||
511 | static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, | ||
512 | struct kvm_assigned_irq *assigned_irq) | ||
513 | { | ||
514 | int r = -EINVAL; | ||
515 | struct kvm_assigned_dev_kernel *match; | ||
516 | unsigned long host_irq_type, guest_irq_type; | ||
517 | |||
518 | if (!capable(CAP_SYS_RAWIO)) | ||
519 | return -EPERM; | ||
520 | |||
521 | if (!irqchip_in_kernel(kvm)) | ||
522 | return r; | ||
523 | |||
524 | mutex_lock(&kvm->lock); | ||
525 | r = -ENODEV; | ||
526 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
527 | assigned_irq->assigned_dev_id); | ||
528 | if (!match) | ||
529 | goto out; | ||
530 | |||
531 | host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK); | ||
532 | guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK); | ||
533 | |||
534 | r = -EINVAL; | ||
535 | /* can only assign one type at a time */ | ||
536 | if (hweight_long(host_irq_type) > 1) | ||
537 | goto out; | ||
538 | if (hweight_long(guest_irq_type) > 1) | ||
539 | goto out; | ||
540 | if (host_irq_type == 0 && guest_irq_type == 0) | ||
541 | goto out; | ||
542 | |||
543 | r = 0; | ||
544 | if (host_irq_type) | ||
545 | r = assign_host_irq(kvm, match, host_irq_type); | ||
546 | if (r) | ||
547 | goto out; | ||
548 | |||
549 | if (guest_irq_type) | ||
550 | r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type); | ||
551 | out: | ||
552 | mutex_unlock(&kvm->lock); | ||
553 | return r; | ||
554 | } | ||
555 | |||
556 | static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, | ||
557 | struct kvm_assigned_irq | ||
558 | *assigned_irq) | ||
559 | { | ||
560 | int r = -ENODEV; | ||
561 | struct kvm_assigned_dev_kernel *match; | ||
562 | |||
563 | mutex_lock(&kvm->lock); | ||
564 | |||
565 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
566 | assigned_irq->assigned_dev_id); | ||
567 | if (!match) | ||
568 | goto out; | ||
569 | |||
570 | r = kvm_deassign_irq(kvm, match, assigned_irq->flags); | ||
571 | out: | ||
572 | mutex_unlock(&kvm->lock); | ||
573 | return r; | ||
574 | } | ||
575 | |||
576 | static int kvm_vm_ioctl_assign_device(struct kvm *kvm, | ||
577 | struct kvm_assigned_pci_dev *assigned_dev) | ||
578 | { | ||
579 | int r = 0; | ||
580 | struct kvm_assigned_dev_kernel *match; | ||
581 | struct pci_dev *dev; | ||
582 | |||
583 | down_read(&kvm->slots_lock); | ||
584 | mutex_lock(&kvm->lock); | ||
585 | |||
586 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
587 | assigned_dev->assigned_dev_id); | ||
588 | if (match) { | ||
589 | /* device already assigned */ | ||
590 | r = -EEXIST; | ||
591 | goto out; | ||
592 | } | ||
593 | |||
594 | match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL); | ||
595 | if (match == NULL) { | ||
596 | printk(KERN_INFO "%s: Couldn't allocate memory\n", | ||
597 | __func__); | ||
598 | r = -ENOMEM; | ||
599 | goto out; | ||
600 | } | ||
601 | dev = pci_get_bus_and_slot(assigned_dev->busnr, | ||
602 | assigned_dev->devfn); | ||
603 | if (!dev) { | ||
604 | printk(KERN_INFO "%s: host device not found\n", __func__); | ||
605 | r = -EINVAL; | ||
606 | goto out_free; | ||
607 | } | ||
608 | if (pci_enable_device(dev)) { | ||
609 | printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); | ||
610 | r = -EBUSY; | ||
611 | goto out_put; | ||
612 | } | ||
613 | r = pci_request_regions(dev, "kvm_assigned_device"); | ||
614 | if (r) { | ||
615 | printk(KERN_INFO "%s: Could not get access to device regions\n", | ||
616 | __func__); | ||
617 | goto out_disable; | ||
618 | } | ||
619 | |||
620 | pci_reset_function(dev); | ||
621 | |||
622 | match->assigned_dev_id = assigned_dev->assigned_dev_id; | ||
623 | match->host_busnr = assigned_dev->busnr; | ||
624 | match->host_devfn = assigned_dev->devfn; | ||
625 | match->flags = assigned_dev->flags; | ||
626 | match->dev = dev; | ||
627 | spin_lock_init(&match->assigned_dev_lock); | ||
628 | match->irq_source_id = -1; | ||
629 | match->kvm = kvm; | ||
630 | match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; | ||
631 | INIT_WORK(&match->interrupt_work, | ||
632 | kvm_assigned_dev_interrupt_work_handler); | ||
633 | |||
634 | list_add(&match->list, &kvm->arch.assigned_dev_head); | ||
635 | |||
636 | if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { | ||
637 | if (!kvm->arch.iommu_domain) { | ||
638 | r = kvm_iommu_map_guest(kvm); | ||
639 | if (r) | ||
640 | goto out_list_del; | ||
641 | } | ||
642 | r = kvm_assign_device(kvm, match); | ||
643 | if (r) | ||
644 | goto out_list_del; | ||
645 | } | ||
646 | |||
647 | out: | ||
648 | mutex_unlock(&kvm->lock); | ||
649 | up_read(&kvm->slots_lock); | ||
650 | return r; | ||
651 | out_list_del: | ||
652 | list_del(&match->list); | ||
653 | pci_release_regions(dev); | ||
654 | out_disable: | ||
655 | pci_disable_device(dev); | ||
656 | out_put: | ||
657 | pci_dev_put(dev); | ||
658 | out_free: | ||
659 | kfree(match); | ||
660 | mutex_unlock(&kvm->lock); | ||
661 | up_read(&kvm->slots_lock); | ||
662 | return r; | ||
663 | } | ||
664 | #endif | ||
665 | |||
666 | #ifdef KVM_CAP_DEVICE_DEASSIGNMENT | ||
667 | static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, | ||
668 | struct kvm_assigned_pci_dev *assigned_dev) | ||
669 | { | ||
670 | int r = 0; | ||
671 | struct kvm_assigned_dev_kernel *match; | ||
672 | |||
673 | mutex_lock(&kvm->lock); | ||
674 | |||
675 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
676 | assigned_dev->assigned_dev_id); | ||
677 | if (!match) { | ||
678 | printk(KERN_INFO "%s: device hasn't been assigned before, " | ||
679 | "so cannot be deassigned\n", __func__); | ||
680 | r = -EINVAL; | ||
681 | goto out; | ||
682 | } | ||
683 | |||
684 | if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) | ||
685 | kvm_deassign_device(kvm, match); | ||
686 | |||
687 | kvm_free_assigned_device(kvm, match); | ||
688 | |||
689 | out: | ||
690 | mutex_unlock(&kvm->lock); | ||
691 | return r; | ||
692 | } | ||
693 | #endif | ||
694 | |||
695 | inline int kvm_is_mmio_pfn(pfn_t pfn) | 87 | inline int kvm_is_mmio_pfn(pfn_t pfn) |
696 | { | 88 | { |
697 | if (pfn_valid(pfn)) { | 89 | if (pfn_valid(pfn)) { |
@@ -1824,88 +1216,6 @@ static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) | |||
1824 | return 0; | 1216 | return 0; |
1825 | } | 1217 | } |
1826 | 1218 | ||
1827 | #ifdef __KVM_HAVE_MSIX | ||
1828 | static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm, | ||
1829 | struct kvm_assigned_msix_nr *entry_nr) | ||
1830 | { | ||
1831 | int r = 0; | ||
1832 | struct kvm_assigned_dev_kernel *adev; | ||
1833 | |||
1834 | mutex_lock(&kvm->lock); | ||
1835 | |||
1836 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
1837 | entry_nr->assigned_dev_id); | ||
1838 | if (!adev) { | ||
1839 | r = -EINVAL; | ||
1840 | goto msix_nr_out; | ||
1841 | } | ||
1842 | |||
1843 | if (adev->entries_nr == 0) { | ||
1844 | adev->entries_nr = entry_nr->entry_nr; | ||
1845 | if (adev->entries_nr == 0 || | ||
1846 | adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) { | ||
1847 | r = -EINVAL; | ||
1848 | goto msix_nr_out; | ||
1849 | } | ||
1850 | |||
1851 | adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) * | ||
1852 | entry_nr->entry_nr, | ||
1853 | GFP_KERNEL); | ||
1854 | if (!adev->host_msix_entries) { | ||
1855 | r = -ENOMEM; | ||
1856 | goto msix_nr_out; | ||
1857 | } | ||
1858 | adev->guest_msix_entries = kzalloc( | ||
1859 | sizeof(struct kvm_guest_msix_entry) * | ||
1860 | entry_nr->entry_nr, GFP_KERNEL); | ||
1861 | if (!adev->guest_msix_entries) { | ||
1862 | kfree(adev->host_msix_entries); | ||
1863 | r = -ENOMEM; | ||
1864 | goto msix_nr_out; | ||
1865 | } | ||
1866 | } else /* Not allowed set MSI-X number twice */ | ||
1867 | r = -EINVAL; | ||
1868 | msix_nr_out: | ||
1869 | mutex_unlock(&kvm->lock); | ||
1870 | return r; | ||
1871 | } | ||
1872 | |||
1873 | static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm, | ||
1874 | struct kvm_assigned_msix_entry *entry) | ||
1875 | { | ||
1876 | int r = 0, i; | ||
1877 | struct kvm_assigned_dev_kernel *adev; | ||
1878 | |||
1879 | mutex_lock(&kvm->lock); | ||
1880 | |||
1881 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
1882 | entry->assigned_dev_id); | ||
1883 | |||
1884 | if (!adev) { | ||
1885 | r = -EINVAL; | ||
1886 | goto msix_entry_out; | ||
1887 | } | ||
1888 | |||
1889 | for (i = 0; i < adev->entries_nr; i++) | ||
1890 | if (adev->guest_msix_entries[i].vector == 0 || | ||
1891 | adev->guest_msix_entries[i].entry == entry->entry) { | ||
1892 | adev->guest_msix_entries[i].entry = entry->entry; | ||
1893 | adev->guest_msix_entries[i].vector = entry->gsi; | ||
1894 | adev->host_msix_entries[i].entry = entry->entry; | ||
1895 | break; | ||
1896 | } | ||
1897 | if (i == adev->entries_nr) { | ||
1898 | r = -ENOSPC; | ||
1899 | goto msix_entry_out; | ||
1900 | } | ||
1901 | |||
1902 | msix_entry_out: | ||
1903 | mutex_unlock(&kvm->lock); | ||
1904 | |||
1905 | return r; | ||
1906 | } | ||
1907 | #endif | ||
1908 | |||
1909 | static long kvm_vcpu_ioctl(struct file *filp, | 1219 | static long kvm_vcpu_ioctl(struct file *filp, |
1910 | unsigned int ioctl, unsigned long arg) | 1220 | unsigned int ioctl, unsigned long arg) |
1911 | { | 1221 | { |
@@ -2164,112 +1474,6 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2164 | break; | 1474 | break; |
2165 | } | 1475 | } |
2166 | #endif | 1476 | #endif |
2167 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | ||
2168 | case KVM_ASSIGN_PCI_DEVICE: { | ||
2169 | struct kvm_assigned_pci_dev assigned_dev; | ||
2170 | |||
2171 | r = -EFAULT; | ||
2172 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | ||
2173 | goto out; | ||
2174 | r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev); | ||
2175 | if (r) | ||
2176 | goto out; | ||
2177 | break; | ||
2178 | } | ||
2179 | case KVM_ASSIGN_IRQ: { | ||
2180 | r = -EOPNOTSUPP; | ||
2181 | break; | ||
2182 | } | ||
2183 | #ifdef KVM_CAP_ASSIGN_DEV_IRQ | ||
2184 | case KVM_ASSIGN_DEV_IRQ: { | ||
2185 | struct kvm_assigned_irq assigned_irq; | ||
2186 | |||
2187 | r = -EFAULT; | ||
2188 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) | ||
2189 | goto out; | ||
2190 | r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq); | ||
2191 | if (r) | ||
2192 | goto out; | ||
2193 | break; | ||
2194 | } | ||
2195 | case KVM_DEASSIGN_DEV_IRQ: { | ||
2196 | struct kvm_assigned_irq assigned_irq; | ||
2197 | |||
2198 | r = -EFAULT; | ||
2199 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) | ||
2200 | goto out; | ||
2201 | r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq); | ||
2202 | if (r) | ||
2203 | goto out; | ||
2204 | break; | ||
2205 | } | ||
2206 | #endif | ||
2207 | #endif | ||
2208 | #ifdef KVM_CAP_DEVICE_DEASSIGNMENT | ||
2209 | case KVM_DEASSIGN_PCI_DEVICE: { | ||
2210 | struct kvm_assigned_pci_dev assigned_dev; | ||
2211 | |||
2212 | r = -EFAULT; | ||
2213 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | ||
2214 | goto out; | ||
2215 | r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev); | ||
2216 | if (r) | ||
2217 | goto out; | ||
2218 | break; | ||
2219 | } | ||
2220 | #endif | ||
2221 | #ifdef KVM_CAP_IRQ_ROUTING | ||
2222 | case KVM_SET_GSI_ROUTING: { | ||
2223 | struct kvm_irq_routing routing; | ||
2224 | struct kvm_irq_routing __user *urouting; | ||
2225 | struct kvm_irq_routing_entry *entries; | ||
2226 | |||
2227 | r = -EFAULT; | ||
2228 | if (copy_from_user(&routing, argp, sizeof(routing))) | ||
2229 | goto out; | ||
2230 | r = -EINVAL; | ||
2231 | if (routing.nr >= KVM_MAX_IRQ_ROUTES) | ||
2232 | goto out; | ||
2233 | if (routing.flags) | ||
2234 | goto out; | ||
2235 | r = -ENOMEM; | ||
2236 | entries = vmalloc(routing.nr * sizeof(*entries)); | ||
2237 | if (!entries) | ||
2238 | goto out; | ||
2239 | r = -EFAULT; | ||
2240 | urouting = argp; | ||
2241 | if (copy_from_user(entries, urouting->entries, | ||
2242 | routing.nr * sizeof(*entries))) | ||
2243 | goto out_free_irq_routing; | ||
2244 | r = kvm_set_irq_routing(kvm, entries, routing.nr, | ||
2245 | routing.flags); | ||
2246 | out_free_irq_routing: | ||
2247 | vfree(entries); | ||
2248 | break; | ||
2249 | } | ||
2250 | #endif /* KVM_CAP_IRQ_ROUTING */ | ||
2251 | #ifdef __KVM_HAVE_MSIX | ||
2252 | case KVM_ASSIGN_SET_MSIX_NR: { | ||
2253 | struct kvm_assigned_msix_nr entry_nr; | ||
2254 | r = -EFAULT; | ||
2255 | if (copy_from_user(&entry_nr, argp, sizeof entry_nr)) | ||
2256 | goto out; | ||
2257 | r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr); | ||
2258 | if (r) | ||
2259 | goto out; | ||
2260 | break; | ||
2261 | } | ||
2262 | case KVM_ASSIGN_SET_MSIX_ENTRY: { | ||
2263 | struct kvm_assigned_msix_entry entry; | ||
2264 | r = -EFAULT; | ||
2265 | if (copy_from_user(&entry, argp, sizeof entry)) | ||
2266 | goto out; | ||
2267 | r = kvm_vm_ioctl_set_msix_entry(kvm, &entry); | ||
2268 | if (r) | ||
2269 | goto out; | ||
2270 | break; | ||
2271 | } | ||
2272 | #endif | ||
2273 | case KVM_IRQFD: { | 1477 | case KVM_IRQFD: { |
2274 | struct kvm_irqfd data; | 1478 | struct kvm_irqfd data; |
2275 | 1479 | ||
@@ -2301,6 +1505,8 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2301 | #endif | 1505 | #endif |
2302 | default: | 1506 | default: |
2303 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); | 1507 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); |
1508 | if (r == -ENOTTY) | ||
1509 | r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); | ||
2304 | } | 1510 | } |
2305 | out: | 1511 | out: |
2306 | return r; | 1512 | return r; |