aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/virtio
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2012-08-28 07:54:14 -0400
committerRusty Russell <rusty@rustcorp.com.au>2012-09-28 01:35:15 -0400
commit75a0a52be3c27b58654fbed2c8f2ff401482b9a4 (patch)
tree01c2815aa913b714f4d11869d56941717737646e /drivers/virtio
parent17bb6d40880d4178f5f8a75900ed8c9ff47d3fb2 (diff)
virtio: introduce an API to set affinity for a virtqueue
Sometimes, virtio device need to configure irq affinity hint to maximize the performance. Instead of just exposing the irq of a virtqueue, this patch introduce an API to set the affinity for a virtqueue. The api is best-effort, the affinity hint may not be set as expected due to platform support, irq sharing or irq type. Currently, only pci method were implemented and we set the affinity according to: - if device uses INTX, we just ignore the request - if device has per vq vector, we force the affinity hint - if the virtqueues share MSI, make the affinity OR over all affinities requested Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers/virtio')
-rw-r--r--drivers/virtio/virtio_pci.c46
1 files changed, 46 insertions, 0 deletions
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index d902464b89c..f5dfe6bdb95 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -48,6 +48,7 @@ struct virtio_pci_device
48 int msix_enabled; 48 int msix_enabled;
49 int intx_enabled; 49 int intx_enabled;
50 struct msix_entry *msix_entries; 50 struct msix_entry *msix_entries;
51 cpumask_var_t *msix_affinity_masks;
51 /* Name strings for interrupts. This size should be enough, 52 /* Name strings for interrupts. This size should be enough,
52 * and I'm too lazy to allocate each name separately. */ 53 * and I'm too lazy to allocate each name separately. */
53 char (*msix_names)[256]; 54 char (*msix_names)[256];
@@ -276,6 +277,10 @@ static void vp_free_vectors(struct virtio_device *vdev)
276 for (i = 0; i < vp_dev->msix_used_vectors; ++i) 277 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
277 free_irq(vp_dev->msix_entries[i].vector, vp_dev); 278 free_irq(vp_dev->msix_entries[i].vector, vp_dev);
278 279
280 for (i = 0; i < vp_dev->msix_vectors; i++)
281 if (vp_dev->msix_affinity_masks[i])
282 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
283
279 if (vp_dev->msix_enabled) { 284 if (vp_dev->msix_enabled) {
280 /* Disable the vector used for configuration */ 285 /* Disable the vector used for configuration */
281 iowrite16(VIRTIO_MSI_NO_VECTOR, 286 iowrite16(VIRTIO_MSI_NO_VECTOR,
@@ -293,6 +298,8 @@ static void vp_free_vectors(struct virtio_device *vdev)
293 vp_dev->msix_names = NULL; 298 vp_dev->msix_names = NULL;
294 kfree(vp_dev->msix_entries); 299 kfree(vp_dev->msix_entries);
295 vp_dev->msix_entries = NULL; 300 vp_dev->msix_entries = NULL;
301 kfree(vp_dev->msix_affinity_masks);
302 vp_dev->msix_affinity_masks = NULL;
296} 303}
297 304
298static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, 305static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
@@ -311,6 +318,15 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
311 GFP_KERNEL); 318 GFP_KERNEL);
312 if (!vp_dev->msix_names) 319 if (!vp_dev->msix_names)
313 goto error; 320 goto error;
321 vp_dev->msix_affinity_masks
322 = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
323 GFP_KERNEL);
324 if (!vp_dev->msix_affinity_masks)
325 goto error;
326 for (i = 0; i < nvectors; ++i)
327 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
328 GFP_KERNEL))
329 goto error;
314 330
315 for (i = 0; i < nvectors; ++i) 331 for (i = 0; i < nvectors; ++i)
316 vp_dev->msix_entries[i].entry = i; 332 vp_dev->msix_entries[i].entry = i;
@@ -606,6 +622,35 @@ static const char *vp_bus_name(struct virtio_device *vdev)
606 return pci_name(vp_dev->pci_dev); 622 return pci_name(vp_dev->pci_dev);
607} 623}
608 624
625/* Setup the affinity for a virtqueue:
626 * - force the affinity for per vq vector
627 * - OR over all affinities for shared MSI
628 * - ignore the affinity request if we're using INTX
629 */
630static int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
631{
632 struct virtio_device *vdev = vq->vdev;
633 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
634 struct virtio_pci_vq_info *info = vq->priv;
635 struct cpumask *mask;
636 unsigned int irq;
637
638 if (!vq->callback)
639 return -EINVAL;
640
641 if (vp_dev->msix_enabled) {
642 mask = vp_dev->msix_affinity_masks[info->msix_vector];
643 irq = vp_dev->msix_entries[info->msix_vector].vector;
644 if (cpu == -1)
645 irq_set_affinity_hint(irq, NULL);
646 else {
647 cpumask_set_cpu(cpu, mask);
648 irq_set_affinity_hint(irq, mask);
649 }
650 }
651 return 0;
652}
653
609static struct virtio_config_ops virtio_pci_config_ops = { 654static struct virtio_config_ops virtio_pci_config_ops = {
610 .get = vp_get, 655 .get = vp_get,
611 .set = vp_set, 656 .set = vp_set,
@@ -617,6 +662,7 @@ static struct virtio_config_ops virtio_pci_config_ops = {
617 .get_features = vp_get_features, 662 .get_features = vp_get_features,
618 .finalize_features = vp_finalize_features, 663 .finalize_features = vp_finalize_features,
619 .bus_name = vp_bus_name, 664 .bus_name = vp_bus_name,
665 .set_vq_affinity = vp_set_vq_affinity,
620}; 666};
621 667
622static void virtio_pci_release_dev(struct device *_d) 668static void virtio_pci_release_dev(struct device *_d)