diff options
Diffstat (limited to 'drivers/virtio/virtio_pci.c')
| -rw-r--r-- | drivers/virtio/virtio_pci.c | 125 |
1 files changed, 70 insertions, 55 deletions
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 248e00ec4dc1..4a1f1ebff7bf 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
| @@ -84,7 +84,7 @@ struct virtio_pci_vq_info | |||
| 84 | struct list_head node; | 84 | struct list_head node; |
| 85 | 85 | ||
| 86 | /* MSI-X vector (or none) */ | 86 | /* MSI-X vector (or none) */ |
| 87 | unsigned vector; | 87 | unsigned msix_vector; |
| 88 | }; | 88 | }; |
| 89 | 89 | ||
| 90 | /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ | 90 | /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ |
| @@ -280,25 +280,14 @@ static void vp_free_vectors(struct virtio_device *vdev) | |||
| 280 | vp_dev->msix_entries = NULL; | 280 | vp_dev->msix_entries = NULL; |
| 281 | } | 281 | } |
| 282 | 282 | ||
| 283 | static int vp_request_vectors(struct virtio_device *vdev, int nvectors, | 283 | static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, |
| 284 | bool per_vq_vectors) | 284 | bool per_vq_vectors) |
| 285 | { | 285 | { |
| 286 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 286 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
| 287 | const char *name = dev_name(&vp_dev->vdev.dev); | 287 | const char *name = dev_name(&vp_dev->vdev.dev); |
| 288 | unsigned i, v; | 288 | unsigned i, v; |
| 289 | int err = -ENOMEM; | 289 | int err = -ENOMEM; |
| 290 | 290 | ||
| 291 | if (!nvectors) { | ||
| 292 | /* Can't allocate MSI-X vectors, use regular interrupt */ | ||
| 293 | vp_dev->msix_vectors = 0; | ||
| 294 | err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, | ||
| 295 | IRQF_SHARED, name, vp_dev); | ||
| 296 | if (err) | ||
| 297 | return err; | ||
| 298 | vp_dev->intx_enabled = 1; | ||
| 299 | return 0; | ||
| 300 | } | ||
| 301 | |||
| 302 | vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, | 291 | vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, |
| 303 | GFP_KERNEL); | 292 | GFP_KERNEL); |
| 304 | if (!vp_dev->msix_entries) | 293 | if (!vp_dev->msix_entries) |
| @@ -311,6 +300,7 @@ static int vp_request_vectors(struct virtio_device *vdev, int nvectors, | |||
| 311 | for (i = 0; i < nvectors; ++i) | 300 | for (i = 0; i < nvectors; ++i) |
| 312 | vp_dev->msix_entries[i].entry = i; | 301 | vp_dev->msix_entries[i].entry = i; |
| 313 | 302 | ||
| 303 | /* pci_enable_msix returns positive if we can't get this many. */ | ||
| 314 | err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors); | 304 | err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors); |
| 315 | if (err > 0) | 305 | if (err > 0) |
| 316 | err = -ENOSPC; | 306 | err = -ENOSPC; |
| @@ -356,10 +346,22 @@ error: | |||
| 356 | return err; | 346 | return err; |
| 357 | } | 347 | } |
| 358 | 348 | ||
| 359 | static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | 349 | static int vp_request_intx(struct virtio_device *vdev) |
| 360 | void (*callback)(struct virtqueue *vq), | 350 | { |
| 361 | const char *name, | 351 | int err; |
| 362 | u16 vector) | 352 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
| 353 | |||
| 354 | err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, | ||
| 355 | IRQF_SHARED, dev_name(&vdev->dev), vp_dev); | ||
| 356 | if (!err) | ||
| 357 | vp_dev->intx_enabled = 1; | ||
| 358 | return err; | ||
| 359 | } | ||
| 360 | |||
| 361 | static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index, | ||
| 362 | void (*callback)(struct virtqueue *vq), | ||
| 363 | const char *name, | ||
| 364 | u16 msix_vec) | ||
| 363 | { | 365 | { |
| 364 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 366 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
| 365 | struct virtio_pci_vq_info *info; | 367 | struct virtio_pci_vq_info *info; |
| @@ -384,7 +386,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
| 384 | 386 | ||
| 385 | info->queue_index = index; | 387 | info->queue_index = index; |
| 386 | info->num = num; | 388 | info->num = num; |
| 387 | info->vector = vector; | 389 | info->msix_vector = msix_vec; |
| 388 | 390 | ||
| 389 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); | 391 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); |
| 390 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); | 392 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); |
| @@ -408,10 +410,10 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
| 408 | vq->priv = info; | 410 | vq->priv = info; |
| 409 | info->vq = vq; | 411 | info->vq = vq; |
| 410 | 412 | ||
| 411 | if (vector != VIRTIO_MSI_NO_VECTOR) { | 413 | if (msix_vec != VIRTIO_MSI_NO_VECTOR) { |
| 412 | iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | 414 | iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); |
| 413 | vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | 415 | msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); |
| 414 | if (vector == VIRTIO_MSI_NO_VECTOR) { | 416 | if (msix_vec == VIRTIO_MSI_NO_VECTOR) { |
| 415 | err = -EBUSY; | 417 | err = -EBUSY; |
| 416 | goto out_assign; | 418 | goto out_assign; |
| 417 | } | 419 | } |
| @@ -472,7 +474,8 @@ static void vp_del_vqs(struct virtio_device *vdev) | |||
| 472 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) { | 474 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) { |
| 473 | info = vq->priv; | 475 | info = vq->priv; |
| 474 | if (vp_dev->per_vq_vectors) | 476 | if (vp_dev->per_vq_vectors) |
| 475 | free_irq(vp_dev->msix_entries[info->vector].vector, vq); | 477 | free_irq(vp_dev->msix_entries[info->msix_vector].vector, |
| 478 | vq); | ||
| 476 | vp_del_vq(vq); | 479 | vp_del_vq(vq); |
| 477 | } | 480 | } |
| 478 | vp_dev->per_vq_vectors = false; | 481 | vp_dev->per_vq_vectors = false; |
| @@ -484,38 +487,58 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
| 484 | struct virtqueue *vqs[], | 487 | struct virtqueue *vqs[], |
| 485 | vq_callback_t *callbacks[], | 488 | vq_callback_t *callbacks[], |
| 486 | const char *names[], | 489 | const char *names[], |
| 487 | int nvectors, | 490 | bool use_msix, |
| 488 | bool per_vq_vectors) | 491 | bool per_vq_vectors) |
| 489 | { | 492 | { |
| 490 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 493 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
| 491 | u16 vector; | 494 | u16 msix_vec; |
| 492 | int i, err, allocated_vectors; | 495 | int i, err, nvectors, allocated_vectors; |
| 493 | 496 | ||
| 494 | err = vp_request_vectors(vdev, nvectors, per_vq_vectors); | 497 | if (!use_msix) { |
| 495 | if (err) | 498 | /* Old style: one normal interrupt for change and all vqs. */ |
| 496 | goto error_request; | 499 | err = vp_request_intx(vdev); |
| 500 | if (err) | ||
| 501 | goto error_request; | ||
| 502 | } else { | ||
| 503 | if (per_vq_vectors) { | ||
| 504 | /* Best option: one for change interrupt, one per vq. */ | ||
| 505 | nvectors = 1; | ||
| 506 | for (i = 0; i < nvqs; ++i) | ||
| 507 | if (callbacks[i]) | ||
| 508 | ++nvectors; | ||
| 509 | } else { | ||
| 510 | /* Second best: one for change, shared for all vqs. */ | ||
| 511 | nvectors = 2; | ||
| 512 | } | ||
| 513 | |||
| 514 | err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors); | ||
| 515 | if (err) | ||
| 516 | goto error_request; | ||
| 517 | } | ||
| 497 | 518 | ||
| 498 | vp_dev->per_vq_vectors = per_vq_vectors; | 519 | vp_dev->per_vq_vectors = per_vq_vectors; |
| 499 | allocated_vectors = vp_dev->msix_used_vectors; | 520 | allocated_vectors = vp_dev->msix_used_vectors; |
| 500 | for (i = 0; i < nvqs; ++i) { | 521 | for (i = 0; i < nvqs; ++i) { |
| 501 | if (!callbacks[i] || !vp_dev->msix_enabled) | 522 | if (!callbacks[i] || !vp_dev->msix_enabled) |
| 502 | vector = VIRTIO_MSI_NO_VECTOR; | 523 | msix_vec = VIRTIO_MSI_NO_VECTOR; |
| 503 | else if (vp_dev->per_vq_vectors) | 524 | else if (vp_dev->per_vq_vectors) |
| 504 | vector = allocated_vectors++; | 525 | msix_vec = allocated_vectors++; |
| 505 | else | 526 | else |
| 506 | vector = VP_MSIX_VQ_VECTOR; | 527 | msix_vec = VP_MSIX_VQ_VECTOR; |
| 507 | vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i], vector); | 528 | vqs[i] = setup_vq(vdev, i, callbacks[i], names[i], msix_vec); |
| 508 | if (IS_ERR(vqs[i])) { | 529 | if (IS_ERR(vqs[i])) { |
| 509 | err = PTR_ERR(vqs[i]); | 530 | err = PTR_ERR(vqs[i]); |
| 510 | goto error_find; | 531 | goto error_find; |
| 511 | } | 532 | } |
| 512 | /* allocate per-vq irq if available and necessary */ | 533 | /* allocate per-vq irq if available and necessary */ |
| 513 | if (vp_dev->per_vq_vectors && vector != VIRTIO_MSI_NO_VECTOR) { | 534 | if (vp_dev->per_vq_vectors) { |
| 514 | snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names, | 535 | snprintf(vp_dev->msix_names[msix_vec], |
| 515 | "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]); | 536 | sizeof *vp_dev->msix_names, |
| 516 | err = request_irq(vp_dev->msix_entries[vector].vector, | 537 | "%s-%s", |
| 517 | vring_interrupt, 0, | 538 | dev_name(&vp_dev->vdev.dev), names[i]); |
| 518 | vp_dev->msix_names[vector], vqs[i]); | 539 | err = request_irq(msix_vec, vring_interrupt, 0, |
| 540 | vp_dev->msix_names[msix_vec], | ||
| 541 | vqs[i]); | ||
| 519 | if (err) { | 542 | if (err) { |
| 520 | vp_del_vq(vqs[i]); | 543 | vp_del_vq(vqs[i]); |
| 521 | goto error_find; | 544 | goto error_find; |
| @@ -537,28 +560,20 @@ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
| 537 | vq_callback_t *callbacks[], | 560 | vq_callback_t *callbacks[], |
| 538 | const char *names[]) | 561 | const char *names[]) |
| 539 | { | 562 | { |
| 540 | int vectors = 0; | 563 | int err; |
| 541 | int i, uninitialized_var(err); | ||
| 542 | |||
| 543 | /* How many vectors would we like? */ | ||
| 544 | for (i = 0; i < nvqs; ++i) | ||
| 545 | if (callbacks[i]) | ||
| 546 | ++vectors; | ||
| 547 | 564 | ||
| 548 | /* We want at most one vector per queue and one for config changes. */ | 565 | /* Try MSI-X with one vector per queue. */ |
| 549 | err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, | 566 | err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); |
| 550 | vectors + 1, true); | ||
| 551 | if (!err) | 567 | if (!err) |
| 552 | return 0; | 568 | return 0; |
| 553 | /* Fallback to separate vectors for config and a shared for queues. */ | 569 | /* Fallback: MSI-X with one vector for config, one shared for queues. */ |
| 554 | err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, | 570 | err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, |
| 555 | 2, false); | 571 | true, false); |
| 556 | if (!err) | 572 | if (!err) |
| 557 | return 0; | 573 | return 0; |
| 558 | /* Finally fall back to regular interrupts. */ | 574 | /* Finally fall back to regular interrupts. */ |
| 559 | err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, | 575 | return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, |
| 560 | 0, false); | 576 | false, false); |
| 561 | return err; | ||
| 562 | } | 577 | } |
| 563 | 578 | ||
| 564 | static struct virtio_config_ops virtio_pci_config_ops = { | 579 | static struct virtio_config_ops virtio_pci_config_ops = { |
