aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2016-04-25 22:14:32 -0400
committerMichael S. Tsirkin <mst@redhat.com>2016-08-01 14:44:50 -0400
commit7235acdb1144460d9f520f0d931f3cbb79eb244c (patch)
tree3dfa5687299d67780a5b8416303e37c028d5c8e0
parent523d939ef98fd712632d93a5a2b588e477a7565e (diff)
vhost: simplify work flushing
We used to implement the work flushing through tracking queued seq, done seq, and the number of flushing. This patch simplify this by just implement work flushing through another kind of vhost work with completion. This will be used by lockless enqueuing patch. Signed-off-by: Jason Wang <jasowang@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-rw-r--r--drivers/vhost/vhost.c53
1 files changed, 21 insertions, 32 deletions
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 669fef1e2bb6..73dd16d0f587 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -131,6 +131,19 @@ static void vhost_reset_is_le(struct vhost_virtqueue *vq)
131 vq->is_le = virtio_legacy_is_little_endian(); 131 vq->is_le = virtio_legacy_is_little_endian();
132} 132}
133 133
134struct vhost_flush_struct {
135 struct vhost_work work;
136 struct completion wait_event;
137};
138
139static void vhost_flush_work(struct vhost_work *work)
140{
141 struct vhost_flush_struct *s;
142
143 s = container_of(work, struct vhost_flush_struct, work);
144 complete(&s->wait_event);
145}
146
134static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, 147static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
135 poll_table *pt) 148 poll_table *pt)
136{ 149{
@@ -158,8 +171,6 @@ void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
158 INIT_LIST_HEAD(&work->node); 171 INIT_LIST_HEAD(&work->node);
159 work->fn = fn; 172 work->fn = fn;
160 init_waitqueue_head(&work->done); 173 init_waitqueue_head(&work->done);
161 work->flushing = 0;
162 work->queue_seq = work->done_seq = 0;
163} 174}
164EXPORT_SYMBOL_GPL(vhost_work_init); 175EXPORT_SYMBOL_GPL(vhost_work_init);
165 176
@@ -211,31 +222,17 @@ void vhost_poll_stop(struct vhost_poll *poll)
211} 222}
212EXPORT_SYMBOL_GPL(vhost_poll_stop); 223EXPORT_SYMBOL_GPL(vhost_poll_stop);
213 224
214static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
215 unsigned seq)
216{
217 int left;
218
219 spin_lock_irq(&dev->work_lock);
220 left = seq - work->done_seq;
221 spin_unlock_irq(&dev->work_lock);
222 return left <= 0;
223}
224
225void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) 225void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
226{ 226{
227 unsigned seq; 227 struct vhost_flush_struct flush;
228 int flushing; 228
229 if (dev->worker) {
230 init_completion(&flush.wait_event);
231 vhost_work_init(&flush.work, vhost_flush_work);
229 232
230 spin_lock_irq(&dev->work_lock); 233 vhost_work_queue(dev, &flush.work);
231 seq = work->queue_seq; 234 wait_for_completion(&flush.wait_event);
232 work->flushing++; 235 }
233 spin_unlock_irq(&dev->work_lock);
234 wait_event(work->done, vhost_work_seq_done(dev, work, seq));
235 spin_lock_irq(&dev->work_lock);
236 flushing = --work->flushing;
237 spin_unlock_irq(&dev->work_lock);
238 BUG_ON(flushing < 0);
239} 236}
240EXPORT_SYMBOL_GPL(vhost_work_flush); 237EXPORT_SYMBOL_GPL(vhost_work_flush);
241 238
@@ -254,7 +251,6 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
254 spin_lock_irqsave(&dev->work_lock, flags); 251 spin_lock_irqsave(&dev->work_lock, flags);
255 if (list_empty(&work->node)) { 252 if (list_empty(&work->node)) {
256 list_add_tail(&work->node, &dev->work_list); 253 list_add_tail(&work->node, &dev->work_list);
257 work->queue_seq++;
258 spin_unlock_irqrestore(&dev->work_lock, flags); 254 spin_unlock_irqrestore(&dev->work_lock, flags);
259 wake_up_process(dev->worker); 255 wake_up_process(dev->worker);
260 } else { 256 } else {
@@ -310,7 +306,6 @@ static int vhost_worker(void *data)
310{ 306{
311 struct vhost_dev *dev = data; 307 struct vhost_dev *dev = data;
312 struct vhost_work *work = NULL; 308 struct vhost_work *work = NULL;
313 unsigned uninitialized_var(seq);
314 mm_segment_t oldfs = get_fs(); 309 mm_segment_t oldfs = get_fs();
315 310
316 set_fs(USER_DS); 311 set_fs(USER_DS);
@@ -321,11 +316,6 @@ static int vhost_worker(void *data)
321 set_current_state(TASK_INTERRUPTIBLE); 316 set_current_state(TASK_INTERRUPTIBLE);
322 317
323 spin_lock_irq(&dev->work_lock); 318 spin_lock_irq(&dev->work_lock);
324 if (work) {
325 work->done_seq = seq;
326 if (work->flushing)
327 wake_up_all(&work->done);
328 }
329 319
330 if (kthread_should_stop()) { 320 if (kthread_should_stop()) {
331 spin_unlock_irq(&dev->work_lock); 321 spin_unlock_irq(&dev->work_lock);
@@ -336,7 +326,6 @@ static int vhost_worker(void *data)
336 work = list_first_entry(&dev->work_list, 326 work = list_first_entry(&dev->work_list,
337 struct vhost_work, node); 327 struct vhost_work, node);
338 list_del_init(&work->node); 328 list_del_init(&work->node);
339 seq = work->queue_seq;
340 } else 329 } else
341 work = NULL; 330 work = NULL;
342 spin_unlock_irq(&dev->work_lock); 331 spin_unlock_irq(&dev->work_lock);