aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/virtio_net.c5
-rw-r--r--drivers/virtio/virtio_ring.c56
-rw-r--r--include/linux/virtio.h4
3 files changed, 51 insertions, 14 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c9e00387d999..42d670a468f8 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -602,7 +602,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
602 container_of(napi, struct receive_queue, napi); 602 container_of(napi, struct receive_queue, napi);
603 struct virtnet_info *vi = rq->vq->vdev->priv; 603 struct virtnet_info *vi = rq->vq->vdev->priv;
604 void *buf; 604 void *buf;
605 unsigned int len, received = 0; 605 unsigned int r, len, received = 0;
606 606
607again: 607again:
608 while (received < budget && 608 while (received < budget &&
@@ -619,8 +619,9 @@ again:
619 619
620 /* Out of packets? */ 620 /* Out of packets? */
621 if (received < budget) { 621 if (received < budget) {
622 r = virtqueue_enable_cb_prepare(rq->vq);
622 napi_complete(napi); 623 napi_complete(napi);
623 if (unlikely(!virtqueue_enable_cb(rq->vq)) && 624 if (unlikely(virtqueue_poll(rq->vq, r)) &&
624 napi_schedule_prep(napi)) { 625 napi_schedule_prep(napi)) {
625 virtqueue_disable_cb(rq->vq); 626 virtqueue_disable_cb(rq->vq);
626 __napi_schedule(napi); 627 __napi_schedule(napi);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5217baf5528c..37d58f84dc50 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -607,19 +607,21 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
607EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 607EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
608 608
609/** 609/**
610 * virtqueue_enable_cb - restart callbacks after disable_cb. 610 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
611 * @vq: the struct virtqueue we're talking about. 611 * @vq: the struct virtqueue we're talking about.
612 * 612 *
613 * This re-enables callbacks; it returns "false" if there are pending 613 * This re-enables callbacks; it returns current queue state
614 * buffers in the queue, to detect a possible race between the driver 614 * in an opaque unsigned value. This value should be later tested by
615 * checking for more work, and enabling callbacks. 615 * virtqueue_poll, to detect a possible race between the driver checking for
616 * more work, and enabling callbacks.
616 * 617 *
617 * Caller must ensure we don't call this with other virtqueue 618 * Caller must ensure we don't call this with other virtqueue
618 * operations at the same time (except where noted). 619 * operations at the same time (except where noted).
619 */ 620 */
620bool virtqueue_enable_cb(struct virtqueue *_vq) 621unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
621{ 622{
622 struct vring_virtqueue *vq = to_vvq(_vq); 623 struct vring_virtqueue *vq = to_vvq(_vq);
624 u16 last_used_idx;
623 625
624 START_USE(vq); 626 START_USE(vq);
625 627
@@ -629,15 +631,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
629 * either clear the flags bit or point the event index at the next 631 * either clear the flags bit or point the event index at the next
630 * entry. Always do both to keep code simple. */ 632 * entry. Always do both to keep code simple. */
631 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 633 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
632 vring_used_event(&vq->vring) = vq->last_used_idx; 634 vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
635 END_USE(vq);
636 return last_used_idx;
637}
638EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
639
640/**
641 * virtqueue_poll - query pending used buffers
642 * @vq: the struct virtqueue we're talking about.
643 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
644 *
645 * Returns "true" if there are pending used buffers in the queue.
646 *
647 * This does not need to be serialized.
648 */
649bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
650{
651 struct vring_virtqueue *vq = to_vvq(_vq);
652
633 virtio_mb(vq->weak_barriers); 653 virtio_mb(vq->weak_barriers);
634 if (unlikely(more_used(vq))) { 654 return (u16)last_used_idx != vq->vring.used->idx;
635 END_USE(vq); 655}
636 return false; 656EXPORT_SYMBOL_GPL(virtqueue_poll);
637 }
638 657
639 END_USE(vq); 658/**
640 return true; 659 * virtqueue_enable_cb - restart callbacks after disable_cb.
660 * @vq: the struct virtqueue we're talking about.
661 *
662 * This re-enables callbacks; it returns "false" if there are pending
663 * buffers in the queue, to detect a possible race between the driver
664 * checking for more work, and enabling callbacks.
665 *
666 * Caller must ensure we don't call this with other virtqueue
667 * operations at the same time (except where noted).
668 */
669bool virtqueue_enable_cb(struct virtqueue *_vq)
670{
671 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
672 return !virtqueue_poll(_vq, last_used_idx);
641} 673}
642EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 674EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
643 675
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 9ff8645b7e0b..72398eea6e86 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -70,6 +70,10 @@ void virtqueue_disable_cb(struct virtqueue *vq);
70 70
71bool virtqueue_enable_cb(struct virtqueue *vq); 71bool virtqueue_enable_cb(struct virtqueue *vq);
72 72
73unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
74
75bool virtqueue_poll(struct virtqueue *vq, unsigned);
76
73bool virtqueue_enable_cb_delayed(struct virtqueue *vq); 77bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
74 78
75void *virtqueue_detach_unused_buf(struct virtqueue *vq); 79void *virtqueue_detach_unused_buf(struct virtqueue *vq);