diff options
| -rw-r--r-- | drivers/vfio/pci/vfio_pci_intrs.c | 31 |
1 files changed, 16 insertions, 15 deletions
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index fa033c32e3d8..de069796b2fd 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c | |||
| @@ -44,6 +44,7 @@ struct virqfd { | |||
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | static struct workqueue_struct *vfio_irqfd_cleanup_wq; | 46 | static struct workqueue_struct *vfio_irqfd_cleanup_wq; |
| 47 | DEFINE_SPINLOCK(virqfd_lock); | ||
| 47 | 48 | ||
| 48 | int __init vfio_virqfd_init(void) | 49 | int __init vfio_virqfd_init(void) |
| 49 | { | 50 | { |
| @@ -80,21 +81,21 @@ static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) | |||
| 80 | 81 | ||
| 81 | if (flags & POLLHUP) { | 82 | if (flags & POLLHUP) { |
| 82 | unsigned long flags; | 83 | unsigned long flags; |
| 83 | spin_lock_irqsave(&virqfd->vdev->irqlock, flags); | 84 | spin_lock_irqsave(&virqfd_lock, flags); |
| 84 | 85 | ||
| 85 | /* | 86 | /* |
| 86 | * The eventfd is closing, if the virqfd has not yet been | 87 | * The eventfd is closing, if the virqfd has not yet been |
| 87 | * queued for release, as determined by testing whether the | 88 | * queued for release, as determined by testing whether the |
| 88 | * vdev pointer to it is still valid, queue it now. As | 89 | * virqfd pointer to it is still valid, queue it now. As |
| 89 | * with kvm irqfds, we know we won't race against the virqfd | 90 | * with kvm irqfds, we know we won't race against the virqfd |
| 90 | * going away because we hold wqh->lock to get here. | 91 | * going away because we hold the lock to get here. |
| 91 | */ | 92 | */ |
| 92 | if (*(virqfd->pvirqfd) == virqfd) { | 93 | if (*(virqfd->pvirqfd) == virqfd) { |
| 93 | *(virqfd->pvirqfd) = NULL; | 94 | *(virqfd->pvirqfd) = NULL; |
| 94 | virqfd_deactivate(virqfd); | 95 | virqfd_deactivate(virqfd); |
| 95 | } | 96 | } |
| 96 | 97 | ||
| 97 | spin_unlock_irqrestore(&virqfd->vdev->irqlock, flags); | 98 | spin_unlock_irqrestore(&virqfd_lock, flags); |
| 98 | } | 99 | } |
| 99 | 100 | ||
| 100 | return 0; | 101 | return 0; |
| @@ -170,16 +171,16 @@ int vfio_virqfd_enable(struct vfio_pci_device *vdev, | |||
| 170 | * we update the pointer to the virqfd under lock to avoid | 171 | * we update the pointer to the virqfd under lock to avoid |
| 171 | * pushing multiple jobs to release the same virqfd. | 172 | * pushing multiple jobs to release the same virqfd. |
| 172 | */ | 173 | */ |
| 173 | spin_lock_irq(&vdev->irqlock); | 174 | spin_lock_irq(&virqfd_lock); |
| 174 | 175 | ||
| 175 | if (*pvirqfd) { | 176 | if (*pvirqfd) { |
| 176 | spin_unlock_irq(&vdev->irqlock); | 177 | spin_unlock_irq(&virqfd_lock); |
| 177 | ret = -EBUSY; | 178 | ret = -EBUSY; |
| 178 | goto err_busy; | 179 | goto err_busy; |
| 179 | } | 180 | } |
| 180 | *pvirqfd = virqfd; | 181 | *pvirqfd = virqfd; |
| 181 | 182 | ||
| 182 | spin_unlock_irq(&vdev->irqlock); | 183 | spin_unlock_irq(&virqfd_lock); |
| 183 | 184 | ||
| 184 | /* | 185 | /* |
| 185 | * Install our own custom wake-up handling so we are notified via | 186 | * Install our own custom wake-up handling so we are notified via |
| @@ -217,18 +218,18 @@ err_fd: | |||
| 217 | } | 218 | } |
| 218 | EXPORT_SYMBOL_GPL(vfio_virqfd_enable); | 219 | EXPORT_SYMBOL_GPL(vfio_virqfd_enable); |
| 219 | 220 | ||
| 220 | void vfio_virqfd_disable(struct vfio_pci_device *vdev, struct virqfd **pvirqfd) | 221 | void vfio_virqfd_disable(struct virqfd **pvirqfd) |
| 221 | { | 222 | { |
| 222 | unsigned long flags; | 223 | unsigned long flags; |
| 223 | 224 | ||
| 224 | spin_lock_irqsave(&vdev->irqlock, flags); | 225 | spin_lock_irqsave(&virqfd_lock, flags); |
| 225 | 226 | ||
| 226 | if (*pvirqfd) { | 227 | if (*pvirqfd) { |
| 227 | virqfd_deactivate(*pvirqfd); | 228 | virqfd_deactivate(*pvirqfd); |
| 228 | *pvirqfd = NULL; | 229 | *pvirqfd = NULL; |
| 229 | } | 230 | } |
| 230 | 231 | ||
| 231 | spin_unlock_irqrestore(&vdev->irqlock, flags); | 232 | spin_unlock_irqrestore(&virqfd_lock, flags); |
| 232 | 233 | ||
| 233 | /* | 234 | /* |
| 234 | * Block until we know all outstanding shutdown jobs have completed. | 235 | * Block until we know all outstanding shutdown jobs have completed. |
| @@ -441,8 +442,8 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd) | |||
| 441 | static void vfio_intx_disable(struct vfio_pci_device *vdev) | 442 | static void vfio_intx_disable(struct vfio_pci_device *vdev) |
| 442 | { | 443 | { |
| 443 | vfio_intx_set_signal(vdev, -1); | 444 | vfio_intx_set_signal(vdev, -1); |
| 444 | vfio_virqfd_disable(vdev, &vdev->ctx[0].unmask); | 445 | vfio_virqfd_disable(&vdev->ctx[0].unmask); |
| 445 | vfio_virqfd_disable(vdev, &vdev->ctx[0].mask); | 446 | vfio_virqfd_disable(&vdev->ctx[0].mask); |
| 446 | vdev->irq_type = VFIO_PCI_NUM_IRQS; | 447 | vdev->irq_type = VFIO_PCI_NUM_IRQS; |
| 447 | vdev->num_ctx = 0; | 448 | vdev->num_ctx = 0; |
| 448 | kfree(vdev->ctx); | 449 | kfree(vdev->ctx); |
| @@ -606,8 +607,8 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) | |||
| 606 | vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); | 607 | vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); |
| 607 | 608 | ||
| 608 | for (i = 0; i < vdev->num_ctx; i++) { | 609 | for (i = 0; i < vdev->num_ctx; i++) { |
| 609 | vfio_virqfd_disable(vdev, &vdev->ctx[i].unmask); | 610 | vfio_virqfd_disable(&vdev->ctx[i].unmask); |
| 610 | vfio_virqfd_disable(vdev, &vdev->ctx[i].mask); | 611 | vfio_virqfd_disable(&vdev->ctx[i].mask); |
| 611 | } | 612 | } |
| 612 | 613 | ||
| 613 | if (msix) { | 614 | if (msix) { |
| @@ -645,7 +646,7 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev, | |||
| 645 | vfio_send_intx_eventfd, NULL, | 646 | vfio_send_intx_eventfd, NULL, |
| 646 | &vdev->ctx[0].unmask, fd); | 647 | &vdev->ctx[0].unmask, fd); |
| 647 | 648 | ||
| 648 | vfio_virqfd_disable(vdev, &vdev->ctx[0].unmask); | 649 | vfio_virqfd_disable(&vdev->ctx[0].unmask); |
| 649 | } | 650 | } |
| 650 | 651 | ||
| 651 | return 0; | 652 | return 0; |
