diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/virtio/virtio_pci.c | 228 |
1 files changed, 209 insertions, 19 deletions
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 951e673e50a4..193c8f0e5cc5 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
@@ -42,6 +42,26 @@ struct virtio_pci_device | |||
42 | /* a list of queues so we can dispatch IRQs */ | 42 | /* a list of queues so we can dispatch IRQs */ |
43 | spinlock_t lock; | 43 | spinlock_t lock; |
44 | struct list_head virtqueues; | 44 | struct list_head virtqueues; |
45 | |||
46 | /* MSI-X support */ | ||
47 | int msix_enabled; | ||
48 | int intx_enabled; | ||
49 | struct msix_entry *msix_entries; | ||
50 | /* Name strings for interrupts. This size should be enough, | ||
51 | * and I'm too lazy to allocate each name separately. */ | ||
52 | char (*msix_names)[256]; | ||
53 | /* Number of available vectors */ | ||
54 | unsigned msix_vectors; | ||
55 | /* Vectors allocated */ | ||
56 | unsigned msix_used_vectors; | ||
57 | }; | ||
58 | |||
59 | /* Constants for MSI-X */ | ||
60 | /* Use first vector for configuration changes, second and the rest for | ||
61 | * virtqueues Thus, we need at least 2 vectors for MSI. */ | ||
62 | enum { | ||
63 | VP_MSIX_CONFIG_VECTOR = 0, | ||
64 | VP_MSIX_VQ_VECTOR = 1, | ||
45 | }; | 65 | }; |
46 | 66 | ||
47 | struct virtio_pci_vq_info | 67 | struct virtio_pci_vq_info |
@@ -60,6 +80,9 @@ struct virtio_pci_vq_info | |||
60 | 80 | ||
61 | /* the list node for the virtqueues list */ | 81 | /* the list node for the virtqueues list */ |
62 | struct list_head node; | 82 | struct list_head node; |
83 | |||
84 | /* MSI-X vector (or none) */ | ||
85 | unsigned vector; | ||
63 | }; | 86 | }; |
64 | 87 | ||
65 | /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ | 88 | /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ |
@@ -109,7 +132,8 @@ static void vp_get(struct virtio_device *vdev, unsigned offset, | |||
109 | void *buf, unsigned len) | 132 | void *buf, unsigned len) |
110 | { | 133 | { |
111 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 134 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
112 | void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset; | 135 | void __iomem *ioaddr = vp_dev->ioaddr + |
136 | VIRTIO_PCI_CONFIG(vp_dev) + offset; | ||
113 | u8 *ptr = buf; | 137 | u8 *ptr = buf; |
114 | int i; | 138 | int i; |
115 | 139 | ||
@@ -123,7 +147,8 @@ static void vp_set(struct virtio_device *vdev, unsigned offset, | |||
123 | const void *buf, unsigned len) | 147 | const void *buf, unsigned len) |
124 | { | 148 | { |
125 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 149 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
126 | void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset; | 150 | void __iomem *ioaddr = vp_dev->ioaddr + |
151 | VIRTIO_PCI_CONFIG(vp_dev) + offset; | ||
127 | const u8 *ptr = buf; | 152 | const u8 *ptr = buf; |
128 | int i; | 153 | int i; |
129 | 154 | ||
@@ -221,7 +246,122 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) | |||
221 | return vp_vring_interrupt(irq, opaque); | 246 | return vp_vring_interrupt(irq, opaque); |
222 | } | 247 | } |
223 | 248 | ||
224 | /* the config->find_vq() implementation */ | 249 | static void vp_free_vectors(struct virtio_device *vdev) |
250 | { | ||
251 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
252 | int i; | ||
253 | |||
254 | if (vp_dev->intx_enabled) { | ||
255 | free_irq(vp_dev->pci_dev->irq, vp_dev); | ||
256 | vp_dev->intx_enabled = 0; | ||
257 | } | ||
258 | |||
259 | for (i = 0; i < vp_dev->msix_used_vectors; ++i) | ||
260 | free_irq(vp_dev->msix_entries[i].vector, vp_dev); | ||
261 | vp_dev->msix_used_vectors = 0; | ||
262 | |||
263 | if (vp_dev->msix_enabled) { | ||
264 | /* Disable the vector used for configuration */ | ||
265 | iowrite16(VIRTIO_MSI_NO_VECTOR, | ||
266 | vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
267 | /* Flush the write out to device */ | ||
268 | ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
269 | |||
270 | vp_dev->msix_enabled = 0; | ||
271 | pci_disable_msix(vp_dev->pci_dev); | ||
272 | } | ||
273 | } | ||
274 | |||
275 | static int vp_enable_msix(struct pci_dev *dev, struct msix_entry *entries, | ||
276 | int *options, int noptions) | ||
277 | { | ||
278 | int i; | ||
279 | for (i = 0; i < noptions; ++i) | ||
280 | if (!pci_enable_msix(dev, entries, options[i])) | ||
281 | return options[i]; | ||
282 | return -EBUSY; | ||
283 | } | ||
284 | |||
285 | static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs) | ||
286 | { | ||
287 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
288 | const char *name = dev_name(&vp_dev->vdev.dev); | ||
289 | unsigned i, v; | ||
290 | int err = -ENOMEM; | ||
291 | /* We want at most one vector per queue and one for config changes. | ||
292 | * Fallback to separate vectors for config and a shared for queues. | ||
293 | * Finally fall back to regular interrupts. */ | ||
294 | int options[] = { max_vqs + 1, 2 }; | ||
295 | int nvectors = max(options[0], options[1]); | ||
296 | |||
297 | vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, | ||
298 | GFP_KERNEL); | ||
299 | if (!vp_dev->msix_entries) | ||
300 | goto error_entries; | ||
301 | vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, | ||
302 | GFP_KERNEL); | ||
303 | if (!vp_dev->msix_names) | ||
304 | goto error_names; | ||
305 | |||
306 | for (i = 0; i < nvectors; ++i) | ||
307 | vp_dev->msix_entries[i].entry = i; | ||
308 | |||
309 | err = vp_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, | ||
310 | options, ARRAY_SIZE(options)); | ||
311 | if (err < 0) { | ||
312 | /* Can't allocate enough MSI-X vectors, use regular interrupt */ | ||
313 | vp_dev->msix_vectors = 0; | ||
314 | err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, | ||
315 | IRQF_SHARED, name, vp_dev); | ||
316 | if (err) | ||
317 | goto error_irq; | ||
318 | vp_dev->intx_enabled = 1; | ||
319 | } else { | ||
320 | vp_dev->msix_vectors = err; | ||
321 | vp_dev->msix_enabled = 1; | ||
322 | |||
323 | /* Set the vector used for configuration */ | ||
324 | v = vp_dev->msix_used_vectors; | ||
325 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, | ||
326 | "%s-config", name); | ||
327 | err = request_irq(vp_dev->msix_entries[v].vector, | ||
328 | vp_config_changed, 0, vp_dev->msix_names[v], | ||
329 | vp_dev); | ||
330 | if (err) | ||
331 | goto error_irq; | ||
332 | ++vp_dev->msix_used_vectors; | ||
333 | |||
334 | iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
335 | /* Verify we had enough resources to assign the vector */ | ||
336 | v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
337 | if (v == VIRTIO_MSI_NO_VECTOR) { | ||
338 | err = -EBUSY; | ||
339 | goto error_irq; | ||
340 | } | ||
341 | } | ||
342 | |||
343 | if (vp_dev->msix_vectors && vp_dev->msix_vectors != max_vqs + 1) { | ||
344 | /* Shared vector for all VQs */ | ||
345 | v = vp_dev->msix_used_vectors; | ||
346 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, | ||
347 | "%s-virtqueues", name); | ||
348 | err = request_irq(vp_dev->msix_entries[v].vector, | ||
349 | vp_vring_interrupt, 0, vp_dev->msix_names[v], | ||
350 | vp_dev); | ||
351 | if (err) | ||
352 | goto error_irq; | ||
353 | ++vp_dev->msix_used_vectors; | ||
354 | } | ||
355 | return 0; | ||
356 | error_irq: | ||
357 | vp_free_vectors(vdev); | ||
358 | kfree(vp_dev->msix_names); | ||
359 | error_names: | ||
360 | kfree(vp_dev->msix_entries); | ||
361 | error_entries: | ||
362 | return err; | ||
363 | } | ||
364 | |||
225 | static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | 365 | static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, |
226 | void (*callback)(struct virtqueue *vq), | 366 | void (*callback)(struct virtqueue *vq), |
227 | const char *name) | 367 | const char *name) |
@@ -230,7 +370,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
230 | struct virtio_pci_vq_info *info; | 370 | struct virtio_pci_vq_info *info; |
231 | struct virtqueue *vq; | 371 | struct virtqueue *vq; |
232 | unsigned long flags, size; | 372 | unsigned long flags, size; |
233 | u16 num; | 373 | u16 num, vector; |
234 | int err; | 374 | int err; |
235 | 375 | ||
236 | /* Select the queue we're interested in */ | 376 | /* Select the queue we're interested in */ |
@@ -249,6 +389,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
249 | 389 | ||
250 | info->queue_index = index; | 390 | info->queue_index = index; |
251 | info->num = num; | 391 | info->num = num; |
392 | info->vector = VIRTIO_MSI_NO_VECTOR; | ||
252 | 393 | ||
253 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); | 394 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); |
254 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); | 395 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); |
@@ -272,12 +413,43 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
272 | vq->priv = info; | 413 | vq->priv = info; |
273 | info->vq = vq; | 414 | info->vq = vq; |
274 | 415 | ||
416 | /* allocate per-vq vector if available and necessary */ | ||
417 | if (callback && vp_dev->msix_used_vectors < vp_dev->msix_vectors) { | ||
418 | vector = vp_dev->msix_used_vectors; | ||
419 | snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names, | ||
420 | "%s-%s", dev_name(&vp_dev->vdev.dev), name); | ||
421 | err = request_irq(vp_dev->msix_entries[vector].vector, | ||
422 | vring_interrupt, 0, | ||
423 | vp_dev->msix_names[vector], vq); | ||
424 | if (err) | ||
425 | goto out_request_irq; | ||
426 | info->vector = vector; | ||
427 | ++vp_dev->msix_used_vectors; | ||
428 | } else | ||
429 | vector = VP_MSIX_VQ_VECTOR; | ||
430 | |||
431 | if (callback && vp_dev->msix_enabled) { | ||
432 | iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | ||
433 | vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | ||
434 | if (vector == VIRTIO_MSI_NO_VECTOR) { | ||
435 | err = -EBUSY; | ||
436 | goto out_assign; | ||
437 | } | ||
438 | } | ||
439 | |||
275 | spin_lock_irqsave(&vp_dev->lock, flags); | 440 | spin_lock_irqsave(&vp_dev->lock, flags); |
276 | list_add(&info->node, &vp_dev->virtqueues); | 441 | list_add(&info->node, &vp_dev->virtqueues); |
277 | spin_unlock_irqrestore(&vp_dev->lock, flags); | 442 | spin_unlock_irqrestore(&vp_dev->lock, flags); |
278 | 443 | ||
279 | return vq; | 444 | return vq; |
280 | 445 | ||
446 | out_assign: | ||
447 | if (info->vector != VIRTIO_MSI_NO_VECTOR) { | ||
448 | free_irq(vp_dev->msix_entries[info->vector].vector, vq); | ||
449 | --vp_dev->msix_used_vectors; | ||
450 | } | ||
451 | out_request_irq: | ||
452 | vring_del_virtqueue(vq); | ||
281 | out_activate_queue: | 453 | out_activate_queue: |
282 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | 454 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
283 | free_pages_exact(info->queue, size); | 455 | free_pages_exact(info->queue, size); |
@@ -286,17 +458,27 @@ out_info: | |||
286 | return ERR_PTR(err); | 458 | return ERR_PTR(err); |
287 | } | 459 | } |
288 | 460 | ||
289 | /* the config->del_vq() implementation */ | ||
290 | static void vp_del_vq(struct virtqueue *vq) | 461 | static void vp_del_vq(struct virtqueue *vq) |
291 | { | 462 | { |
292 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); | 463 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); |
293 | struct virtio_pci_vq_info *info = vq->priv; | 464 | struct virtio_pci_vq_info *info = vq->priv; |
294 | unsigned long size; | 465 | unsigned long size; |
295 | 466 | ||
467 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); | ||
468 | |||
469 | if (info->vector != VIRTIO_MSI_NO_VECTOR) | ||
470 | free_irq(vp_dev->msix_entries[info->vector].vector, vq); | ||
471 | |||
472 | if (vp_dev->msix_enabled) { | ||
473 | iowrite16(VIRTIO_MSI_NO_VECTOR, | ||
474 | vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | ||
475 | /* Flush the write out to device */ | ||
476 | ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); | ||
477 | } | ||
478 | |||
296 | vring_del_virtqueue(vq); | 479 | vring_del_virtqueue(vq); |
297 | 480 | ||
298 | /* Select and deactivate the queue */ | 481 | /* Select and deactivate the queue */ |
299 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); | ||
300 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | 482 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
301 | 483 | ||
302 | size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); | 484 | size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); |
@@ -304,30 +486,46 @@ static void vp_del_vq(struct virtqueue *vq) | |||
304 | kfree(info); | 486 | kfree(info); |
305 | } | 487 | } |
306 | 488 | ||
489 | /* the config->del_vqs() implementation */ | ||
307 | static void vp_del_vqs(struct virtio_device *vdev) | 490 | static void vp_del_vqs(struct virtio_device *vdev) |
308 | { | 491 | { |
309 | struct virtqueue *vq, *n; | 492 | struct virtqueue *vq, *n; |
310 | 493 | ||
311 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) | 494 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) |
312 | vp_del_vq(vq); | 495 | vp_del_vq(vq); |
496 | |||
497 | vp_free_vectors(vdev); | ||
313 | } | 498 | } |
314 | 499 | ||
500 | /* the config->find_vqs() implementation */ | ||
315 | static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 501 | static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
316 | struct virtqueue *vqs[], | 502 | struct virtqueue *vqs[], |
317 | vq_callback_t *callbacks[], | 503 | vq_callback_t *callbacks[], |
318 | const char *names[]) | 504 | const char *names[]) |
319 | { | 505 | { |
320 | int i; | 506 | int vectors = 0; |
507 | int i, err; | ||
508 | |||
509 | /* How many vectors would we like? */ | ||
510 | for (i = 0; i < nvqs; ++i) | ||
511 | if (callbacks[i]) | ||
512 | ++vectors; | ||
513 | |||
514 | err = vp_request_vectors(vdev, vectors); | ||
515 | if (err) | ||
516 | goto error_request; | ||
321 | 517 | ||
322 | for (i = 0; i < nvqs; ++i) { | 518 | for (i = 0; i < nvqs; ++i) { |
323 | vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]); | 519 | vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]); |
324 | if (IS_ERR(vqs[i])) | 520 | if (IS_ERR(vqs[i])) |
325 | goto error; | 521 | goto error_find; |
326 | } | 522 | } |
327 | return 0; | 523 | return 0; |
328 | 524 | ||
329 | error: | 525 | error_find: |
330 | vp_del_vqs(vdev); | 526 | vp_del_vqs(vdev); |
527 | |||
528 | error_request: | ||
331 | return PTR_ERR(vqs[i]); | 529 | return PTR_ERR(vqs[i]); |
332 | } | 530 | } |
333 | 531 | ||
@@ -349,7 +547,7 @@ static void virtio_pci_release_dev(struct device *_d) | |||
349 | struct virtio_pci_device *vp_dev = to_vp_device(dev); | 547 | struct virtio_pci_device *vp_dev = to_vp_device(dev); |
350 | struct pci_dev *pci_dev = vp_dev->pci_dev; | 548 | struct pci_dev *pci_dev = vp_dev->pci_dev; |
351 | 549 | ||
352 | free_irq(pci_dev->irq, vp_dev); | 550 | vp_del_vqs(dev); |
353 | pci_set_drvdata(pci_dev, NULL); | 551 | pci_set_drvdata(pci_dev, NULL); |
354 | pci_iounmap(pci_dev, vp_dev->ioaddr); | 552 | pci_iounmap(pci_dev, vp_dev->ioaddr); |
355 | pci_release_regions(pci_dev); | 553 | pci_release_regions(pci_dev); |
@@ -408,21 +606,13 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, | |||
408 | vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; | 606 | vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; |
409 | vp_dev->vdev.id.device = pci_dev->subsystem_device; | 607 | vp_dev->vdev.id.device = pci_dev->subsystem_device; |
410 | 608 | ||
411 | /* register a handler for the queue with the PCI device's interrupt */ | ||
412 | err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, | ||
413 | dev_name(&vp_dev->vdev.dev), vp_dev); | ||
414 | if (err) | ||
415 | goto out_set_drvdata; | ||
416 | |||
417 | /* finally register the virtio device */ | 609 | /* finally register the virtio device */ |
418 | err = register_virtio_device(&vp_dev->vdev); | 610 | err = register_virtio_device(&vp_dev->vdev); |
419 | if (err) | 611 | if (err) |
420 | goto out_req_irq; | 612 | goto out_set_drvdata; |
421 | 613 | ||
422 | return 0; | 614 | return 0; |
423 | 615 | ||
424 | out_req_irq: | ||
425 | free_irq(pci_dev->irq, vp_dev); | ||
426 | out_set_drvdata: | 616 | out_set_drvdata: |
427 | pci_set_drvdata(pci_dev, NULL); | 617 | pci_set_drvdata(pci_dev, NULL); |
428 | pci_iounmap(pci_dev, vp_dev->ioaddr); | 618 | pci_iounmap(pci_dev, vp_dev->ioaddr); |