aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost/vhost.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vhost/vhost.c')
-rw-r--r--drivers/vhost/vhost.c52
1 files changed, 7 insertions, 45 deletions
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index dedaf81d8f36..5a3d0f1eaf94 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -26,10 +26,6 @@
26#include <linux/kthread.h> 26#include <linux/kthread.h>
27#include <linux/cgroup.h> 27#include <linux/cgroup.h>
28 28
29#include <linux/net.h>
30#include <linux/if_packet.h>
31#include <linux/if_arp.h>
32
33#include "vhost.h" 29#include "vhost.h"
34 30
35enum { 31enum {
@@ -414,28 +410,16 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
414 return 0; 410 return 0;
415} 411}
416 412
417/* In case of DMA done not in order in lower device driver for some reason. 413void vhost_dev_stop(struct vhost_dev *dev)
418 * upend_idx is used to track end of used idx, done_idx is used to track head
419 * of used idx. Once lower device DMA done contiguously, we will signal KVM
420 * guest used idx.
421 */
422int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq)
423{ 414{
424 int i; 415 int i;
425 int j = 0; 416
426 417 for (i = 0; i < dev->nvqs; ++i) {
427 for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) { 418 if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
428 if ((vq->heads[i].len == VHOST_DMA_DONE_LEN)) { 419 vhost_poll_stop(&dev->vqs[i].poll);
429 vq->heads[i].len = VHOST_DMA_CLEAR_LEN; 420 vhost_poll_flush(&dev->vqs[i].poll);
430 vhost_add_used_and_signal(vq->dev, vq, 421 }
431 vq->heads[i].id, 0);
432 ++j;
433 } else
434 break;
435 } 422 }
436 if (j)
437 vq->done_idx = i;
438 return j;
439} 423}
440 424
441/* Caller should have device mutex if and only if locked is set */ 425/* Caller should have device mutex if and only if locked is set */
@@ -444,17 +428,6 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
444 int i; 428 int i;
445 429
446 for (i = 0; i < dev->nvqs; ++i) { 430 for (i = 0; i < dev->nvqs; ++i) {
447 if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
448 vhost_poll_stop(&dev->vqs[i].poll);
449 vhost_poll_flush(&dev->vqs[i].poll);
450 }
451 /* Wait for all lower device DMAs done. */
452 if (dev->vqs[i].ubufs)
453 vhost_ubuf_put_and_wait(dev->vqs[i].ubufs);
454
455 /* Signal guest as appropriate. */
456 vhost_zerocopy_signal_used(&dev->vqs[i]);
457
458 if (dev->vqs[i].error_ctx) 431 if (dev->vqs[i].error_ctx)
459 eventfd_ctx_put(dev->vqs[i].error_ctx); 432 eventfd_ctx_put(dev->vqs[i].error_ctx);
460 if (dev->vqs[i].error) 433 if (dev->vqs[i].error)
@@ -1599,14 +1572,3 @@ void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
1599 wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount)); 1572 wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
1600 kfree(ubufs); 1573 kfree(ubufs);
1601} 1574}
1602
1603void vhost_zerocopy_callback(struct ubuf_info *ubuf)
1604{
1605 struct vhost_ubuf_ref *ubufs = ubuf->ctx;
1606 struct vhost_virtqueue *vq = ubufs->vq;
1607
1608 vhost_poll_queue(&vq->poll);
1609 /* set len = 1 to mark this desc buffers done DMA */
1610 vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
1611 kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
1612}