diff options
-rw-r--r-- | drivers/virtio/virtio_ring.c | 27 | ||||
-rw-r--r-- | include/linux/virtio.h | 9 |
2 files changed, 36 insertions, 0 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index a5fadc40c448..68b9136847af 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -376,6 +376,33 @@ bool virtqueue_enable_cb(struct virtqueue *_vq) | |||
376 | } | 376 | } |
377 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb); | 377 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb); |
378 | 378 | ||
379 | bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) | ||
380 | { | ||
381 | struct vring_virtqueue *vq = to_vvq(_vq); | ||
382 | u16 bufs; | ||
383 | |||
384 | START_USE(vq); | ||
385 | |||
386 | /* We optimistically turn back on interrupts, then check if there was | ||
387 | * more to do. */ | ||
388 | /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to | ||
389 | * either clear the flags bit or point the event index at the next | ||
390 | * entry. Always do both to keep code simple. */ | ||
391 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; | ||
392 | /* TODO: tune this threshold */ | ||
393 | bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; | ||
394 | vring_used_event(&vq->vring) = vq->last_used_idx + bufs; | ||
395 | virtio_mb(); | ||
396 | if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { | ||
397 | END_USE(vq); | ||
398 | return false; | ||
399 | } | ||
400 | |||
401 | END_USE(vq); | ||
402 | return true; | ||
403 | } | ||
404 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); | ||
405 | |||
379 | void *virtqueue_detach_unused_buf(struct virtqueue *_vq) | 406 | void *virtqueue_detach_unused_buf(struct virtqueue *_vq) |
380 | { | 407 | { |
381 | struct vring_virtqueue *vq = to_vvq(_vq); | 408 | struct vring_virtqueue *vq = to_vvq(_vq); |
diff --git a/include/linux/virtio.h b/include/linux/virtio.h index aff5b4f74041..710885749605 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h | |||
@@ -51,6 +51,13 @@ struct virtqueue { | |||
51 | * This re-enables callbacks; it returns "false" if there are pending | 51 | * This re-enables callbacks; it returns "false" if there are pending |
52 | * buffers in the queue, to detect a possible race between the driver | 52 | * buffers in the queue, to detect a possible race between the driver |
53 | * checking for more work, and enabling callbacks. | 53 | * checking for more work, and enabling callbacks. |
54 | * virtqueue_enable_cb_delayed: restart callbacks after disable_cb. | ||
55 | * vq: the struct virtqueue we're talking about. | ||
56 | * This re-enables callbacks but hints to the other side to delay | ||
57 | * interrupts until most of the available buffers have been processed; | ||
58 | * it returns "false" if there are many pending buffers in the queue, | ||
59 | * to detect a possible race between the driver checking for more work, | ||
60 | * and enabling callbacks. | ||
54 | * virtqueue_detach_unused_buf: detach first unused buffer | 61 | * virtqueue_detach_unused_buf: detach first unused buffer |
55 | * vq: the struct virtqueue we're talking about. | 62 | * vq: the struct virtqueue we're talking about. |
56 | * Returns NULL or the "data" token handed to add_buf | 63 | * Returns NULL or the "data" token handed to add_buf |
@@ -86,6 +93,8 @@ void virtqueue_disable_cb(struct virtqueue *vq); | |||
86 | 93 | ||
87 | bool virtqueue_enable_cb(struct virtqueue *vq); | 94 | bool virtqueue_enable_cb(struct virtqueue *vq); |
88 | 95 | ||
96 | bool virtqueue_enable_cb_delayed(struct virtqueue *vq); | ||
97 | |||
89 | void *virtqueue_detach_unused_buf(struct virtqueue *vq); | 98 | void *virtqueue_detach_unused_buf(struct virtqueue *vq); |
90 | 99 | ||
91 | /** | 100 | /** |