diff options
author | Tejun Heo <tj@kernel.org> | 2010-06-02 14:40:00 -0400 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2010-07-28 08:44:53 -0400 |
commit | c23f3445e68e1db0e74099f264bc5ff5d55ebdeb (patch) | |
tree | 0a8e22e9a10c2978777954a022d721eb02e622be /drivers/vhost | |
parent | 4cfa580e7eebb8694b875d2caff3b989ada2efac (diff) |
vhost: replace vhost_workqueue with per-vhost kthread
Replace vhost_workqueue with per-vhost kthread. Other than callback
argument change from struct work_struct * to struct vhost_work *,
there's no visible change to vhost_poll_*() interface.
This conversion is to make each vhost use a dedicated kthread so that
resource control via cgroup can be applied.
Partially based on Sridhar Samudrala's patch.
* Updated to use sub structure vhost_work instead of directly using
vhost_poll at Michael's suggestion.
* Added flusher wake_up() optimization at Michael's suggestion.
Changes by MST:
* Converted atomics/barrier use to a spinlock.
* Create thread on SET_OWNER
* Fix flushing
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Cc: Sridhar Samudrala <samudrala.sridhar@gmail.com>
Diffstat (limited to 'drivers/vhost')
-rw-r--r-- | drivers/vhost/net.c | 56 | ||||
-rw-r--r-- | drivers/vhost/vhost.c | 143 | ||||
-rw-r--r-- | drivers/vhost/vhost.h | 38 |
3 files changed, 164 insertions, 73 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index f11e6bb5b036..d395b59289ae 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -302,54 +302,58 @@ static void handle_rx(struct vhost_net *net) | |||
302 | unuse_mm(net->dev.mm); | 302 | unuse_mm(net->dev.mm); |
303 | } | 303 | } |
304 | 304 | ||
305 | static void handle_tx_kick(struct work_struct *work) | 305 | static void handle_tx_kick(struct vhost_work *work) |
306 | { | 306 | { |
307 | struct vhost_virtqueue *vq; | 307 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, |
308 | struct vhost_net *net; | 308 | poll.work); |
309 | vq = container_of(work, struct vhost_virtqueue, poll.work); | 309 | struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); |
310 | net = container_of(vq->dev, struct vhost_net, dev); | 310 | |
311 | handle_tx(net); | 311 | handle_tx(net); |
312 | } | 312 | } |
313 | 313 | ||
314 | static void handle_rx_kick(struct work_struct *work) | 314 | static void handle_rx_kick(struct vhost_work *work) |
315 | { | 315 | { |
316 | struct vhost_virtqueue *vq; | 316 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, |
317 | struct vhost_net *net; | 317 | poll.work); |
318 | vq = container_of(work, struct vhost_virtqueue, poll.work); | 318 | struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); |
319 | net = container_of(vq->dev, struct vhost_net, dev); | 319 | |
320 | handle_rx(net); | 320 | handle_rx(net); |
321 | } | 321 | } |
322 | 322 | ||
323 | static void handle_tx_net(struct work_struct *work) | 323 | static void handle_tx_net(struct vhost_work *work) |
324 | { | 324 | { |
325 | struct vhost_net *net; | 325 | struct vhost_net *net = container_of(work, struct vhost_net, |
326 | net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_TX].work); | 326 | poll[VHOST_NET_VQ_TX].work); |
327 | handle_tx(net); | 327 | handle_tx(net); |
328 | } | 328 | } |
329 | 329 | ||
330 | static void handle_rx_net(struct work_struct *work) | 330 | static void handle_rx_net(struct vhost_work *work) |
331 | { | 331 | { |
332 | struct vhost_net *net; | 332 | struct vhost_net *net = container_of(work, struct vhost_net, |
333 | net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_RX].work); | 333 | poll[VHOST_NET_VQ_RX].work); |
334 | handle_rx(net); | 334 | handle_rx(net); |
335 | } | 335 | } |
336 | 336 | ||
337 | static int vhost_net_open(struct inode *inode, struct file *f) | 337 | static int vhost_net_open(struct inode *inode, struct file *f) |
338 | { | 338 | { |
339 | struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL); | 339 | struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL); |
340 | struct vhost_dev *dev; | ||
340 | int r; | 341 | int r; |
342 | |||
341 | if (!n) | 343 | if (!n) |
342 | return -ENOMEM; | 344 | return -ENOMEM; |
345 | |||
346 | dev = &n->dev; | ||
343 | n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick; | 347 | n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick; |
344 | n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick; | 348 | n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick; |
345 | r = vhost_dev_init(&n->dev, n->vqs, VHOST_NET_VQ_MAX); | 349 | r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX); |
346 | if (r < 0) { | 350 | if (r < 0) { |
347 | kfree(n); | 351 | kfree(n); |
348 | return r; | 352 | return r; |
349 | } | 353 | } |
350 | 354 | ||
351 | vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT); | 355 | vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev); |
352 | vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN); | 356 | vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev); |
353 | n->tx_poll_state = VHOST_NET_POLL_DISABLED; | 357 | n->tx_poll_state = VHOST_NET_POLL_DISABLED; |
354 | 358 | ||
355 | f->private_data = n; | 359 | f->private_data = n; |
@@ -656,25 +660,13 @@ static struct miscdevice vhost_net_misc = { | |||
656 | 660 | ||
657 | static int vhost_net_init(void) | 661 | static int vhost_net_init(void) |
658 | { | 662 | { |
659 | int r = vhost_init(); | 663 | return misc_register(&vhost_net_misc); |
660 | if (r) | ||
661 | goto err_init; | ||
662 | r = misc_register(&vhost_net_misc); | ||
663 | if (r) | ||
664 | goto err_reg; | ||
665 | return 0; | ||
666 | err_reg: | ||
667 | vhost_cleanup(); | ||
668 | err_init: | ||
669 | return r; | ||
670 | |||
671 | } | 664 | } |
672 | module_init(vhost_net_init); | 665 | module_init(vhost_net_init); |
673 | 666 | ||
674 | static void vhost_net_exit(void) | 667 | static void vhost_net_exit(void) |
675 | { | 668 | { |
676 | misc_deregister(&vhost_net_misc); | 669 | misc_deregister(&vhost_net_misc); |
677 | vhost_cleanup(); | ||
678 | } | 670 | } |
679 | module_exit(vhost_net_exit); | 671 | module_exit(vhost_net_exit); |
680 | 672 | ||
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 248ed2db0711..30d93c2b45b8 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -17,12 +17,12 @@ | |||
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/miscdevice.h> | 18 | #include <linux/miscdevice.h> |
19 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
20 | #include <linux/workqueue.h> | ||
21 | #include <linux/rcupdate.h> | 20 | #include <linux/rcupdate.h> |
22 | #include <linux/poll.h> | 21 | #include <linux/poll.h> |
23 | #include <linux/file.h> | 22 | #include <linux/file.h> |
24 | #include <linux/highmem.h> | 23 | #include <linux/highmem.h> |
25 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/kthread.h> | ||
26 | 26 | ||
27 | #include <linux/net.h> | 27 | #include <linux/net.h> |
28 | #include <linux/if_packet.h> | 28 | #include <linux/if_packet.h> |
@@ -37,8 +37,6 @@ enum { | |||
37 | VHOST_MEMORY_F_LOG = 0x1, | 37 | VHOST_MEMORY_F_LOG = 0x1, |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static struct workqueue_struct *vhost_workqueue; | ||
41 | |||
42 | static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, | 40 | static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, |
43 | poll_table *pt) | 41 | poll_table *pt) |
44 | { | 42 | { |
@@ -52,23 +50,31 @@ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, | |||
52 | static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync, | 50 | static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync, |
53 | void *key) | 51 | void *key) |
54 | { | 52 | { |
55 | struct vhost_poll *poll; | 53 | struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait); |
56 | poll = container_of(wait, struct vhost_poll, wait); | 54 | |
57 | if (!((unsigned long)key & poll->mask)) | 55 | if (!((unsigned long)key & poll->mask)) |
58 | return 0; | 56 | return 0; |
59 | 57 | ||
60 | queue_work(vhost_workqueue, &poll->work); | 58 | vhost_poll_queue(poll); |
61 | return 0; | 59 | return 0; |
62 | } | 60 | } |
63 | 61 | ||
64 | /* Init poll structure */ | 62 | /* Init poll structure */ |
65 | void vhost_poll_init(struct vhost_poll *poll, work_func_t func, | 63 | void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, |
66 | unsigned long mask) | 64 | unsigned long mask, struct vhost_dev *dev) |
67 | { | 65 | { |
68 | INIT_WORK(&poll->work, func); | 66 | struct vhost_work *work = &poll->work; |
67 | |||
69 | init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); | 68 | init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); |
70 | init_poll_funcptr(&poll->table, vhost_poll_func); | 69 | init_poll_funcptr(&poll->table, vhost_poll_func); |
71 | poll->mask = mask; | 70 | poll->mask = mask; |
71 | poll->dev = dev; | ||
72 | |||
73 | INIT_LIST_HEAD(&work->node); | ||
74 | work->fn = fn; | ||
75 | init_waitqueue_head(&work->done); | ||
76 | work->flushing = 0; | ||
77 | work->queue_seq = work->done_seq = 0; | ||
72 | } | 78 | } |
73 | 79 | ||
74 | /* Start polling a file. We add ourselves to file's wait queue. The caller must | 80 | /* Start polling a file. We add ourselves to file's wait queue. The caller must |
@@ -92,12 +98,40 @@ void vhost_poll_stop(struct vhost_poll *poll) | |||
92 | * locks that are also used by the callback. */ | 98 | * locks that are also used by the callback. */ |
93 | void vhost_poll_flush(struct vhost_poll *poll) | 99 | void vhost_poll_flush(struct vhost_poll *poll) |
94 | { | 100 | { |
95 | flush_work(&poll->work); | 101 | struct vhost_work *work = &poll->work; |
102 | unsigned seq; | ||
103 | int left; | ||
104 | int flushing; | ||
105 | |||
106 | spin_lock_irq(&poll->dev->work_lock); | ||
107 | seq = work->queue_seq; | ||
108 | work->flushing++; | ||
109 | spin_unlock_irq(&poll->dev->work_lock); | ||
110 | wait_event(work->done, ({ | ||
111 | spin_lock_irq(&poll->dev->work_lock); | ||
112 | left = seq - work->done_seq <= 0; | ||
113 | spin_unlock_irq(&poll->dev->work_lock); | ||
114 | left; | ||
115 | })); | ||
116 | spin_lock_irq(&poll->dev->work_lock); | ||
117 | flushing = --work->flushing; | ||
118 | spin_unlock_irq(&poll->dev->work_lock); | ||
119 | BUG_ON(flushing < 0); | ||
96 | } | 120 | } |
97 | 121 | ||
98 | void vhost_poll_queue(struct vhost_poll *poll) | 122 | void vhost_poll_queue(struct vhost_poll *poll) |
99 | { | 123 | { |
100 | queue_work(vhost_workqueue, &poll->work); | 124 | struct vhost_dev *dev = poll->dev; |
125 | struct vhost_work *work = &poll->work; | ||
126 | unsigned long flags; | ||
127 | |||
128 | spin_lock_irqsave(&dev->work_lock, flags); | ||
129 | if (list_empty(&work->node)) { | ||
130 | list_add_tail(&work->node, &dev->work_list); | ||
131 | work->queue_seq++; | ||
132 | wake_up_process(dev->worker); | ||
133 | } | ||
134 | spin_unlock_irqrestore(&dev->work_lock, flags); | ||
101 | } | 135 | } |
102 | 136 | ||
103 | static void vhost_vq_reset(struct vhost_dev *dev, | 137 | static void vhost_vq_reset(struct vhost_dev *dev, |
@@ -125,10 +159,51 @@ static void vhost_vq_reset(struct vhost_dev *dev, | |||
125 | vq->log_ctx = NULL; | 159 | vq->log_ctx = NULL; |
126 | } | 160 | } |
127 | 161 | ||
162 | static int vhost_worker(void *data) | ||
163 | { | ||
164 | struct vhost_dev *dev = data; | ||
165 | struct vhost_work *work = NULL; | ||
166 | unsigned uninitialized_var(seq); | ||
167 | |||
168 | for (;;) { | ||
169 | /* mb paired w/ kthread_stop */ | ||
170 | set_current_state(TASK_INTERRUPTIBLE); | ||
171 | |||
172 | spin_lock_irq(&dev->work_lock); | ||
173 | if (work) { | ||
174 | work->done_seq = seq; | ||
175 | if (work->flushing) | ||
176 | wake_up_all(&work->done); | ||
177 | } | ||
178 | |||
179 | if (kthread_should_stop()) { | ||
180 | spin_unlock_irq(&dev->work_lock); | ||
181 | __set_current_state(TASK_RUNNING); | ||
182 | return 0; | ||
183 | } | ||
184 | if (!list_empty(&dev->work_list)) { | ||
185 | work = list_first_entry(&dev->work_list, | ||
186 | struct vhost_work, node); | ||
187 | list_del_init(&work->node); | ||
188 | seq = work->queue_seq; | ||
189 | } else | ||
190 | work = NULL; | ||
191 | spin_unlock_irq(&dev->work_lock); | ||
192 | |||
193 | if (work) { | ||
194 | __set_current_state(TASK_RUNNING); | ||
195 | work->fn(work); | ||
196 | } else | ||
197 | schedule(); | ||
198 | |||
199 | } | ||
200 | } | ||
201 | |||
128 | long vhost_dev_init(struct vhost_dev *dev, | 202 | long vhost_dev_init(struct vhost_dev *dev, |
129 | struct vhost_virtqueue *vqs, int nvqs) | 203 | struct vhost_virtqueue *vqs, int nvqs) |
130 | { | 204 | { |
131 | int i; | 205 | int i; |
206 | |||
132 | dev->vqs = vqs; | 207 | dev->vqs = vqs; |
133 | dev->nvqs = nvqs; | 208 | dev->nvqs = nvqs; |
134 | mutex_init(&dev->mutex); | 209 | mutex_init(&dev->mutex); |
@@ -136,6 +211,9 @@ long vhost_dev_init(struct vhost_dev *dev, | |||
136 | dev->log_file = NULL; | 211 | dev->log_file = NULL; |
137 | dev->memory = NULL; | 212 | dev->memory = NULL; |
138 | dev->mm = NULL; | 213 | dev->mm = NULL; |
214 | spin_lock_init(&dev->work_lock); | ||
215 | INIT_LIST_HEAD(&dev->work_list); | ||
216 | dev->worker = NULL; | ||
139 | 217 | ||
140 | for (i = 0; i < dev->nvqs; ++i) { | 218 | for (i = 0; i < dev->nvqs; ++i) { |
141 | dev->vqs[i].dev = dev; | 219 | dev->vqs[i].dev = dev; |
@@ -143,9 +221,9 @@ long vhost_dev_init(struct vhost_dev *dev, | |||
143 | vhost_vq_reset(dev, dev->vqs + i); | 221 | vhost_vq_reset(dev, dev->vqs + i); |
144 | if (dev->vqs[i].handle_kick) | 222 | if (dev->vqs[i].handle_kick) |
145 | vhost_poll_init(&dev->vqs[i].poll, | 223 | vhost_poll_init(&dev->vqs[i].poll, |
146 | dev->vqs[i].handle_kick, | 224 | dev->vqs[i].handle_kick, POLLIN, dev); |
147 | POLLIN); | ||
148 | } | 225 | } |
226 | |||
149 | return 0; | 227 | return 0; |
150 | } | 228 | } |
151 | 229 | ||
@@ -159,12 +237,31 @@ long vhost_dev_check_owner(struct vhost_dev *dev) | |||
159 | /* Caller should have device mutex */ | 237 | /* Caller should have device mutex */ |
160 | static long vhost_dev_set_owner(struct vhost_dev *dev) | 238 | static long vhost_dev_set_owner(struct vhost_dev *dev) |
161 | { | 239 | { |
240 | struct task_struct *worker; | ||
241 | int err; | ||
162 | /* Is there an owner already? */ | 242 | /* Is there an owner already? */ |
163 | if (dev->mm) | 243 | if (dev->mm) { |
164 | return -EBUSY; | 244 | err = -EBUSY; |
245 | goto err_mm; | ||
246 | } | ||
165 | /* No owner, become one */ | 247 | /* No owner, become one */ |
166 | dev->mm = get_task_mm(current); | 248 | dev->mm = get_task_mm(current); |
249 | worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid); | ||
250 | if (IS_ERR(worker)) { | ||
251 | err = PTR_ERR(worker); | ||
252 | goto err_worker; | ||
253 | } | ||
254 | |||
255 | dev->worker = worker; | ||
256 | wake_up_process(worker); /* avoid contributing to loadavg */ | ||
257 | |||
167 | return 0; | 258 | return 0; |
259 | err_worker: | ||
260 | if (dev->mm) | ||
261 | mmput(dev->mm); | ||
262 | dev->mm = NULL; | ||
263 | err_mm: | ||
264 | return err; | ||
168 | } | 265 | } |
169 | 266 | ||
170 | /* Caller should have device mutex */ | 267 | /* Caller should have device mutex */ |
@@ -217,6 +314,9 @@ void vhost_dev_cleanup(struct vhost_dev *dev) | |||
217 | if (dev->mm) | 314 | if (dev->mm) |
218 | mmput(dev->mm); | 315 | mmput(dev->mm); |
219 | dev->mm = NULL; | 316 | dev->mm = NULL; |
317 | |||
318 | WARN_ON(!list_empty(&dev->work_list)); | ||
319 | kthread_stop(dev->worker); | ||
220 | } | 320 | } |
221 | 321 | ||
222 | static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) | 322 | static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) |
@@ -1115,16 +1215,3 @@ void vhost_disable_notify(struct vhost_virtqueue *vq) | |||
1115 | vq_err(vq, "Failed to enable notification at %p: %d\n", | 1215 | vq_err(vq, "Failed to enable notification at %p: %d\n", |
1116 | &vq->used->flags, r); | 1216 | &vq->used->flags, r); |
1117 | } | 1217 | } |
1118 | |||
1119 | int vhost_init(void) | ||
1120 | { | ||
1121 | vhost_workqueue = create_singlethread_workqueue("vhost"); | ||
1122 | if (!vhost_workqueue) | ||
1123 | return -ENOMEM; | ||
1124 | return 0; | ||
1125 | } | ||
1126 | |||
1127 | void vhost_cleanup(void) | ||
1128 | { | ||
1129 | destroy_workqueue(vhost_workqueue); | ||
1130 | } | ||
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 11ee13dba0f7..3693327549b3 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
@@ -5,13 +5,13 @@ | |||
5 | #include <linux/vhost.h> | 5 | #include <linux/vhost.h> |
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | #include <linux/mutex.h> | 7 | #include <linux/mutex.h> |
8 | #include <linux/workqueue.h> | ||
9 | #include <linux/poll.h> | 8 | #include <linux/poll.h> |
10 | #include <linux/file.h> | 9 | #include <linux/file.h> |
11 | #include <linux/skbuff.h> | 10 | #include <linux/skbuff.h> |
12 | #include <linux/uio.h> | 11 | #include <linux/uio.h> |
13 | #include <linux/virtio_config.h> | 12 | #include <linux/virtio_config.h> |
14 | #include <linux/virtio_ring.h> | 13 | #include <linux/virtio_ring.h> |
14 | #include <asm/atomic.h> | ||
15 | 15 | ||
16 | struct vhost_device; | 16 | struct vhost_device; |
17 | 17 | ||
@@ -20,19 +20,31 @@ enum { | |||
20 | VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2, | 20 | VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2, |
21 | }; | 21 | }; |
22 | 22 | ||
23 | struct vhost_work; | ||
24 | typedef void (*vhost_work_fn_t)(struct vhost_work *work); | ||
25 | |||
26 | struct vhost_work { | ||
27 | struct list_head node; | ||
28 | vhost_work_fn_t fn; | ||
29 | wait_queue_head_t done; | ||
30 | int flushing; | ||
31 | unsigned queue_seq; | ||
32 | unsigned done_seq; | ||
33 | }; | ||
34 | |||
23 | /* Poll a file (eventfd or socket) */ | 35 | /* Poll a file (eventfd or socket) */ |
24 | /* Note: there's nothing vhost specific about this structure. */ | 36 | /* Note: there's nothing vhost specific about this structure. */ |
25 | struct vhost_poll { | 37 | struct vhost_poll { |
26 | poll_table table; | 38 | poll_table table; |
27 | wait_queue_head_t *wqh; | 39 | wait_queue_head_t *wqh; |
28 | wait_queue_t wait; | 40 | wait_queue_t wait; |
29 | /* struct which will handle all actual work. */ | 41 | struct vhost_work work; |
30 | struct work_struct work; | ||
31 | unsigned long mask; | 42 | unsigned long mask; |
43 | struct vhost_dev *dev; | ||
32 | }; | 44 | }; |
33 | 45 | ||
34 | void vhost_poll_init(struct vhost_poll *poll, work_func_t func, | 46 | void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, |
35 | unsigned long mask); | 47 | unsigned long mask, struct vhost_dev *dev); |
36 | void vhost_poll_start(struct vhost_poll *poll, struct file *file); | 48 | void vhost_poll_start(struct vhost_poll *poll, struct file *file); |
37 | void vhost_poll_stop(struct vhost_poll *poll); | 49 | void vhost_poll_stop(struct vhost_poll *poll); |
38 | void vhost_poll_flush(struct vhost_poll *poll); | 50 | void vhost_poll_flush(struct vhost_poll *poll); |
@@ -63,7 +75,7 @@ struct vhost_virtqueue { | |||
63 | struct vhost_poll poll; | 75 | struct vhost_poll poll; |
64 | 76 | ||
65 | /* The routine to call when the Guest pings us, or timeout. */ | 77 | /* The routine to call when the Guest pings us, or timeout. */ |
66 | work_func_t handle_kick; | 78 | vhost_work_fn_t handle_kick; |
67 | 79 | ||
68 | /* Last available index we saw. */ | 80 | /* Last available index we saw. */ |
69 | u16 last_avail_idx; | 81 | u16 last_avail_idx; |
@@ -86,11 +98,11 @@ struct vhost_virtqueue { | |||
86 | struct iovec hdr[VHOST_NET_MAX_SG]; | 98 | struct iovec hdr[VHOST_NET_MAX_SG]; |
87 | size_t hdr_size; | 99 | size_t hdr_size; |
88 | /* We use a kind of RCU to access private pointer. | 100 | /* We use a kind of RCU to access private pointer. |
89 | * All readers access it from workqueue, which makes it possible to | 101 | * All readers access it from worker, which makes it possible to |
90 | * flush the workqueue instead of synchronize_rcu. Therefore readers do | 102 | * flush the vhost_work instead of synchronize_rcu. Therefore readers do |
91 | * not need to call rcu_read_lock/rcu_read_unlock: the beginning of | 103 | * not need to call rcu_read_lock/rcu_read_unlock: the beginning of |
92 | * work item execution acts instead of rcu_read_lock() and the end of | 104 | * vhost_work execution acts instead of rcu_read_lock() and the end of |
93 | * work item execution acts instead of rcu_read_lock(). | 105 | * vhost_work execution acts instead of rcu_read_lock(). |
94 | * Writers use virtqueue mutex. */ | 106 | * Writers use virtqueue mutex. */ |
95 | void *private_data; | 107 | void *private_data; |
96 | /* Log write descriptors */ | 108 | /* Log write descriptors */ |
@@ -110,6 +122,9 @@ struct vhost_dev { | |||
110 | int nvqs; | 122 | int nvqs; |
111 | struct file *log_file; | 123 | struct file *log_file; |
112 | struct eventfd_ctx *log_ctx; | 124 | struct eventfd_ctx *log_ctx; |
125 | spinlock_t work_lock; | ||
126 | struct list_head work_list; | ||
127 | struct task_struct *worker; | ||
113 | }; | 128 | }; |
114 | 129 | ||
115 | long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs); | 130 | long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs); |
@@ -136,9 +151,6 @@ bool vhost_enable_notify(struct vhost_virtqueue *); | |||
136 | int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, | 151 | int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, |
137 | unsigned int log_num, u64 len); | 152 | unsigned int log_num, u64 len); |
138 | 153 | ||
139 | int vhost_init(void); | ||
140 | void vhost_cleanup(void); | ||
141 | |||
142 | #define vq_err(vq, fmt, ...) do { \ | 154 | #define vq_err(vq, fmt, ...) do { \ |
143 | pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \ | 155 | pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \ |
144 | if ((vq)->error_ctx) \ | 156 | if ((vq)->error_ctx) \ |