aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAntonios Motakis <a.motakis@virtualopensystems.com>2015-03-16 16:08:54 -0400
committerAlex Williamson <alex.williamson@redhat.com>2015-03-16 16:08:54 -0400
commit7e992d692750b2938224eb43fee907181d92a602 (patch)
treecb09975ca0d547f92203e89df3b832daccdcb449
parent09bbcb8810c4673cb96477e0e83c9bcdfadc7741 (diff)
vfio: move eventfd support code for VFIO_PCI to a separate file
The virqfd functionality that is used by VFIO_PCI to implement interrupt masking and unmasking via an eventfd, is generic enough and can be reused by another driver. Move it to a separate file in order to allow the code to be shared. Signed-off-by: Antonios Motakis <a.motakis@virtualopensystems.com> Signed-off-by: Baptiste Reynal <b.reynal@virtualopensystems.com> Reviewed-by: Eric Auger <eric.auger@linaro.org> Tested-by: Eric Auger <eric.auger@linaro.org> Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
-rw-r--r--drivers/vfio/pci/Makefile3
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c215
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h3
-rw-r--r--drivers/vfio/virqfd.c213
-rw-r--r--include/linux/vfio.h27
5 files changed, 242 insertions, 219 deletions
diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
index 131079255fd9..c7c864436896 100644
--- a/drivers/vfio/pci/Makefile
+++ b/drivers/vfio/pci/Makefile
@@ -1,4 +1,5 @@
1 1
2vfio-pci-y := vfio_pci.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o 2vfio-pci-y := vfio_pci.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o \
3 ../virqfd.o
3 4
4obj-$(CONFIG_VFIO_PCI) += vfio-pci.o 5obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index c84a129cc527..1f577b4ac126 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -19,228 +19,13 @@
19#include <linux/msi.h> 19#include <linux/msi.h>
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/file.h> 21#include <linux/file.h>
22#include <linux/poll.h>
23#include <linux/vfio.h> 22#include <linux/vfio.h>
24#include <linux/wait.h> 23#include <linux/wait.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h> 24#include <linux/slab.h>
27 25
28#include "vfio_pci_private.h" 26#include "vfio_pci_private.h"
29 27
30/* 28/*
31 * IRQfd - generic
32 */
33struct virqfd {
34 void *opaque;
35 struct eventfd_ctx *eventfd;
36 int (*handler)(void *, void *);
37 void (*thread)(void *, void *);
38 void *data;
39 struct work_struct inject;
40 wait_queue_t wait;
41 poll_table pt;
42 struct work_struct shutdown;
43 struct virqfd **pvirqfd;
44};
45
46static struct workqueue_struct *vfio_irqfd_cleanup_wq;
47DEFINE_SPINLOCK(virqfd_lock);
48
49int __init vfio_virqfd_init(void)
50{
51 vfio_irqfd_cleanup_wq =
52 create_singlethread_workqueue("vfio-irqfd-cleanup");
53 if (!vfio_irqfd_cleanup_wq)
54 return -ENOMEM;
55
56 return 0;
57}
58
59void vfio_virqfd_exit(void)
60{
61 destroy_workqueue(vfio_irqfd_cleanup_wq);
62}
63
64static void virqfd_deactivate(struct virqfd *virqfd)
65{
66 queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
67}
68
69static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
70{
71 struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
72 unsigned long flags = (unsigned long)key;
73
74 if (flags & POLLIN) {
75 /* An event has been signaled, call function */
76 if ((!virqfd->handler ||
77 virqfd->handler(virqfd->opaque, virqfd->data)) &&
78 virqfd->thread)
79 schedule_work(&virqfd->inject);
80 }
81
82 if (flags & POLLHUP) {
83 unsigned long flags;
84 spin_lock_irqsave(&virqfd_lock, flags);
85
86 /*
87 * The eventfd is closing, if the virqfd has not yet been
88 * queued for release, as determined by testing whether the
89 * virqfd pointer to it is still valid, queue it now. As
90 * with kvm irqfds, we know we won't race against the virqfd
91 * going away because we hold the lock to get here.
92 */
93 if (*(virqfd->pvirqfd) == virqfd) {
94 *(virqfd->pvirqfd) = NULL;
95 virqfd_deactivate(virqfd);
96 }
97
98 spin_unlock_irqrestore(&virqfd_lock, flags);
99 }
100
101 return 0;
102}
103
104static void virqfd_ptable_queue_proc(struct file *file,
105 wait_queue_head_t *wqh, poll_table *pt)
106{
107 struct virqfd *virqfd = container_of(pt, struct virqfd, pt);
108 add_wait_queue(wqh, &virqfd->wait);
109}
110
111static void virqfd_shutdown(struct work_struct *work)
112{
113 struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
114 u64 cnt;
115
116 eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
117 flush_work(&virqfd->inject);
118 eventfd_ctx_put(virqfd->eventfd);
119
120 kfree(virqfd);
121}
122
123static void virqfd_inject(struct work_struct *work)
124{
125 struct virqfd *virqfd = container_of(work, struct virqfd, inject);
126 if (virqfd->thread)
127 virqfd->thread(virqfd->opaque, virqfd->data);
128}
129
130int vfio_virqfd_enable(void *opaque,
131 int (*handler)(void *, void *),
132 void (*thread)(void *, void *),
133 void *data, struct virqfd **pvirqfd, int fd)
134{
135 struct fd irqfd;
136 struct eventfd_ctx *ctx;
137 struct virqfd *virqfd;
138 int ret = 0;
139 unsigned int events;
140
141 virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
142 if (!virqfd)
143 return -ENOMEM;
144
145 virqfd->pvirqfd = pvirqfd;
146 virqfd->opaque = opaque;
147 virqfd->handler = handler;
148 virqfd->thread = thread;
149 virqfd->data = data;
150
151 INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
152 INIT_WORK(&virqfd->inject, virqfd_inject);
153
154 irqfd = fdget(fd);
155 if (!irqfd.file) {
156 ret = -EBADF;
157 goto err_fd;
158 }
159
160 ctx = eventfd_ctx_fileget(irqfd.file);
161 if (IS_ERR(ctx)) {
162 ret = PTR_ERR(ctx);
163 goto err_ctx;
164 }
165
166 virqfd->eventfd = ctx;
167
168 /*
169 * virqfds can be released by closing the eventfd or directly
170 * through ioctl. These are both done through a workqueue, so
171 * we update the pointer to the virqfd under lock to avoid
172 * pushing multiple jobs to release the same virqfd.
173 */
174 spin_lock_irq(&virqfd_lock);
175
176 if (*pvirqfd) {
177 spin_unlock_irq(&virqfd_lock);
178 ret = -EBUSY;
179 goto err_busy;
180 }
181 *pvirqfd = virqfd;
182
183 spin_unlock_irq(&virqfd_lock);
184
185 /*
186 * Install our own custom wake-up handling so we are notified via
187 * a callback whenever someone signals the underlying eventfd.
188 */
189 init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
190 init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
191
192 events = irqfd.file->f_op->poll(irqfd.file, &virqfd->pt);
193
194 /*
195 * Check if there was an event already pending on the eventfd
196 * before we registered and trigger it as if we didn't miss it.
197 */
198 if (events & POLLIN) {
199 if ((!handler || handler(opaque, data)) && thread)
200 schedule_work(&virqfd->inject);
201 }
202
203 /*
204 * Do not drop the file until the irqfd is fully initialized,
205 * otherwise we might race against the POLLHUP.
206 */
207 fdput(irqfd);
208
209 return 0;
210err_busy:
211 eventfd_ctx_put(ctx);
212err_ctx:
213 fdput(irqfd);
214err_fd:
215 kfree(virqfd);
216
217 return ret;
218}
219EXPORT_SYMBOL_GPL(vfio_virqfd_enable);
220
221void vfio_virqfd_disable(struct virqfd **pvirqfd)
222{
223 unsigned long flags;
224
225 spin_lock_irqsave(&virqfd_lock, flags);
226
227 if (*pvirqfd) {
228 virqfd_deactivate(*pvirqfd);
229 *pvirqfd = NULL;
230 }
231
232 spin_unlock_irqrestore(&virqfd_lock, flags);
233
234 /*
235 * Block until we know all outstanding shutdown jobs have completed.
236 * Even if we don't queue the job, flush the wq to be sure it's
237 * been released.
238 */
239 flush_workqueue(vfio_irqfd_cleanup_wq);
240}
241EXPORT_SYMBOL_GPL(vfio_virqfd_disable);
242
243/*
244 * INTx 29 * INTx
245 */ 30 */
246static void vfio_send_intx_eventfd(void *opaque, void *unused) 31static void vfio_send_intx_eventfd(void *opaque, void *unused)
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 02539651bc3a..ae0e1b4c1711 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -87,9 +87,6 @@ extern ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
87extern int vfio_pci_init_perm_bits(void); 87extern int vfio_pci_init_perm_bits(void);
88extern void vfio_pci_uninit_perm_bits(void); 88extern void vfio_pci_uninit_perm_bits(void);
89 89
90extern int vfio_virqfd_init(void);
91extern void vfio_virqfd_exit(void);
92
93extern int vfio_config_init(struct vfio_pci_device *vdev); 90extern int vfio_config_init(struct vfio_pci_device *vdev);
94extern void vfio_config_free(struct vfio_pci_device *vdev); 91extern void vfio_config_free(struct vfio_pci_device *vdev);
95#endif /* VFIO_PCI_PRIVATE_H */ 92#endif /* VFIO_PCI_PRIVATE_H */
diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
new file mode 100644
index 000000000000..5967899645c5
--- /dev/null
+++ b/drivers/vfio/virqfd.c
@@ -0,0 +1,213 @@
1/*
2 * VFIO generic eventfd code for IRQFD support.
3 * Derived from drivers/vfio/pci/vfio_pci_intrs.c
4 *
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/vfio.h>
14#include <linux/eventfd.h>
15#include <linux/file.h>
16#include <linux/slab.h>
17
18static struct workqueue_struct *vfio_irqfd_cleanup_wq;
19DEFINE_SPINLOCK(virqfd_lock);
20
21int __init vfio_virqfd_init(void)
22{
23 vfio_irqfd_cleanup_wq =
24 create_singlethread_workqueue("vfio-irqfd-cleanup");
25 if (!vfio_irqfd_cleanup_wq)
26 return -ENOMEM;
27
28 return 0;
29}
30
31void vfio_virqfd_exit(void)
32{
33 destroy_workqueue(vfio_irqfd_cleanup_wq);
34}
35
36static void virqfd_deactivate(struct virqfd *virqfd)
37{
38 queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
39}
40
41static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
42{
43 struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
44 unsigned long flags = (unsigned long)key;
45
46 if (flags & POLLIN) {
47 /* An event has been signaled, call function */
48 if ((!virqfd->handler ||
49 virqfd->handler(virqfd->opaque, virqfd->data)) &&
50 virqfd->thread)
51 schedule_work(&virqfd->inject);
52 }
53
54 if (flags & POLLHUP) {
55 unsigned long flags;
56 spin_lock_irqsave(&virqfd_lock, flags);
57
58 /*
59 * The eventfd is closing, if the virqfd has not yet been
60 * queued for release, as determined by testing whether the
61 * virqfd pointer to it is still valid, queue it now. As
62 * with kvm irqfds, we know we won't race against the virqfd
63 * going away because we hold the lock to get here.
64 */
65 if (*(virqfd->pvirqfd) == virqfd) {
66 *(virqfd->pvirqfd) = NULL;
67 virqfd_deactivate(virqfd);
68 }
69
70 spin_unlock_irqrestore(&virqfd_lock, flags);
71 }
72
73 return 0;
74}
75
76static void virqfd_ptable_queue_proc(struct file *file,
77 wait_queue_head_t *wqh, poll_table *pt)
78{
79 struct virqfd *virqfd = container_of(pt, struct virqfd, pt);
80 add_wait_queue(wqh, &virqfd->wait);
81}
82
83static void virqfd_shutdown(struct work_struct *work)
84{
85 struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
86 u64 cnt;
87
88 eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
89 flush_work(&virqfd->inject);
90 eventfd_ctx_put(virqfd->eventfd);
91
92 kfree(virqfd);
93}
94
95static void virqfd_inject(struct work_struct *work)
96{
97 struct virqfd *virqfd = container_of(work, struct virqfd, inject);
98 if (virqfd->thread)
99 virqfd->thread(virqfd->opaque, virqfd->data);
100}
101
102int vfio_virqfd_enable(void *opaque,
103 int (*handler)(void *, void *),
104 void (*thread)(void *, void *),
105 void *data, struct virqfd **pvirqfd, int fd)
106{
107 struct fd irqfd;
108 struct eventfd_ctx *ctx;
109 struct virqfd *virqfd;
110 int ret = 0;
111 unsigned int events;
112
113 virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
114 if (!virqfd)
115 return -ENOMEM;
116
117 virqfd->pvirqfd = pvirqfd;
118 virqfd->opaque = opaque;
119 virqfd->handler = handler;
120 virqfd->thread = thread;
121 virqfd->data = data;
122
123 INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
124 INIT_WORK(&virqfd->inject, virqfd_inject);
125
126 irqfd = fdget(fd);
127 if (!irqfd.file) {
128 ret = -EBADF;
129 goto err_fd;
130 }
131
132 ctx = eventfd_ctx_fileget(irqfd.file);
133 if (IS_ERR(ctx)) {
134 ret = PTR_ERR(ctx);
135 goto err_ctx;
136 }
137
138 virqfd->eventfd = ctx;
139
140 /*
141 * virqfds can be released by closing the eventfd or directly
142 * through ioctl. These are both done through a workqueue, so
143 * we update the pointer to the virqfd under lock to avoid
144 * pushing multiple jobs to release the same virqfd.
145 */
146 spin_lock_irq(&virqfd_lock);
147
148 if (*pvirqfd) {
149 spin_unlock_irq(&virqfd_lock);
150 ret = -EBUSY;
151 goto err_busy;
152 }
153 *pvirqfd = virqfd;
154
155 spin_unlock_irq(&virqfd_lock);
156
157 /*
158 * Install our own custom wake-up handling so we are notified via
159 * a callback whenever someone signals the underlying eventfd.
160 */
161 init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
162 init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
163
164 events = irqfd.file->f_op->poll(irqfd.file, &virqfd->pt);
165
166 /*
167 * Check if there was an event already pending on the eventfd
168 * before we registered and trigger it as if we didn't miss it.
169 */
170 if (events & POLLIN) {
171 if ((!handler || handler(opaque, data)) && thread)
172 schedule_work(&virqfd->inject);
173 }
174
175 /*
176 * Do not drop the file until the irqfd is fully initialized,
177 * otherwise we might race against the POLLHUP.
178 */
179 fdput(irqfd);
180
181 return 0;
182err_busy:
183 eventfd_ctx_put(ctx);
184err_ctx:
185 fdput(irqfd);
186err_fd:
187 kfree(virqfd);
188
189 return ret;
190}
191EXPORT_SYMBOL_GPL(vfio_virqfd_enable);
192
193void vfio_virqfd_disable(struct virqfd **pvirqfd)
194{
195 unsigned long flags;
196
197 spin_lock_irqsave(&virqfd_lock, flags);
198
199 if (*pvirqfd) {
200 virqfd_deactivate(*pvirqfd);
201 *pvirqfd = NULL;
202 }
203
204 spin_unlock_irqrestore(&virqfd_lock, flags);
205
206 /*
207 * Block until we know all outstanding shutdown jobs have completed.
208 * Even if we don't queue the job, flush the wq to be sure it's
209 * been released.
210 */
211 flush_workqueue(vfio_irqfd_cleanup_wq);
212}
213EXPORT_SYMBOL_GPL(vfio_virqfd_disable);
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 2d67b8998fd8..683b5146022e 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -14,6 +14,8 @@
14 14
15#include <linux/iommu.h> 15#include <linux/iommu.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/workqueue.h>
18#include <linux/poll.h>
17#include <uapi/linux/vfio.h> 19#include <uapi/linux/vfio.h>
18 20
19/** 21/**
@@ -123,4 +125,29 @@ static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
123 return -ENOTTY; 125 return -ENOTTY;
124} 126}
125#endif /* CONFIG_EEH */ 127#endif /* CONFIG_EEH */
128
129/*
130 * IRQfd - generic
131 */
132struct virqfd {
133 void *opaque;
134 struct eventfd_ctx *eventfd;
135 int (*handler)(void *, void *);
136 void (*thread)(void *, void *);
137 void *data;
138 struct work_struct inject;
139 wait_queue_t wait;
140 poll_table pt;
141 struct work_struct shutdown;
142 struct virqfd **pvirqfd;
143};
144
145extern int vfio_virqfd_init(void);
146extern void vfio_virqfd_exit(void);
147extern int vfio_virqfd_enable(void *opaque,
148 int (*handler)(void *, void *),
149 void (*thread)(void *, void *),
150 void *data, struct virqfd **pvirqfd, int fd);
151extern void vfio_virqfd_disable(struct virqfd **pvirqfd);
152
126#endif /* VFIO_H */ 153#endif /* VFIO_H */