aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/virtio/virtio_ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/virtio/virtio_ring.c')
-rw-r--r--drivers/virtio/virtio_ring.c53
1 files changed, 51 insertions, 2 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index b0043fb26a4d..68b9136847af 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -82,6 +82,9 @@ struct vring_virtqueue
82 /* Host supports indirect buffers */ 82 /* Host supports indirect buffers */
83 bool indirect; 83 bool indirect;
84 84
85 /* Host publishes avail event idx */
86 bool event;
87
85 /* Number of free buffers */ 88 /* Number of free buffers */
86 unsigned int num_free; 89 unsigned int num_free;
87 /* Head of free buffer list. */ 90 /* Head of free buffer list. */
@@ -237,18 +240,22 @@ EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
237void virtqueue_kick(struct virtqueue *_vq) 240void virtqueue_kick(struct virtqueue *_vq)
238{ 241{
239 struct vring_virtqueue *vq = to_vvq(_vq); 242 struct vring_virtqueue *vq = to_vvq(_vq);
243 u16 new, old;
240 START_USE(vq); 244 START_USE(vq);
241 /* Descriptors and available array need to be set before we expose the 245 /* Descriptors and available array need to be set before we expose the
242 * new available array entries. */ 246 * new available array entries. */
243 virtio_wmb(); 247 virtio_wmb();
244 248
245 vq->vring.avail->idx += vq->num_added; 249 old = vq->vring.avail->idx;
250 new = vq->vring.avail->idx = old + vq->num_added;
246 vq->num_added = 0; 251 vq->num_added = 0;
247 252
248 /* Need to update avail index before checking if we should notify */ 253 /* Need to update avail index before checking if we should notify */
249 virtio_mb(); 254 virtio_mb();
250 255
251 if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) 256 if (vq->event ?
257 vring_need_event(vring_avail_event(&vq->vring), new, old) :
258 !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
252 /* Prod other side to tell it about changes. */ 259 /* Prod other side to tell it about changes. */
253 vq->notify(&vq->vq); 260 vq->notify(&vq->vq);
254 261
@@ -324,6 +331,14 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
324 ret = vq->data[i]; 331 ret = vq->data[i];
325 detach_buf(vq, i); 332 detach_buf(vq, i);
326 vq->last_used_idx++; 333 vq->last_used_idx++;
334 /* If we expect an interrupt for the next entry, tell host
335 * by writing event index and flush out the write before
336 * the read in the next get_buf call. */
337 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
338 vring_used_event(&vq->vring) = vq->last_used_idx;
339 virtio_mb();
340 }
341
327 END_USE(vq); 342 END_USE(vq);
328 return ret; 343 return ret;
329} 344}
@@ -345,7 +360,11 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
345 360
346 /* We optimistically turn back on interrupts, then check if there was 361 /* We optimistically turn back on interrupts, then check if there was
347 * more to do. */ 362 * more to do. */
363 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
364 * either clear the flags bit or point the event index at the next
365 * entry. Always do both to keep code simple. */
348 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 366 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
367 vring_used_event(&vq->vring) = vq->last_used_idx;
349 virtio_mb(); 368 virtio_mb();
350 if (unlikely(more_used(vq))) { 369 if (unlikely(more_used(vq))) {
351 END_USE(vq); 370 END_USE(vq);
@@ -357,6 +376,33 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
357} 376}
358EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 377EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
359 378
379bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
380{
381 struct vring_virtqueue *vq = to_vvq(_vq);
382 u16 bufs;
383
384 START_USE(vq);
385
386 /* We optimistically turn back on interrupts, then check if there was
387 * more to do. */
388 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
389 * either clear the flags bit or point the event index at the next
390 * entry. Always do both to keep code simple. */
391 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
392 /* TODO: tune this threshold */
393 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
394 vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
395 virtio_mb();
396 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
397 END_USE(vq);
398 return false;
399 }
400
401 END_USE(vq);
402 return true;
403}
404EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
405
360void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 406void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
361{ 407{
362 struct vring_virtqueue *vq = to_vvq(_vq); 408 struct vring_virtqueue *vq = to_vvq(_vq);
@@ -438,6 +484,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
438#endif 484#endif
439 485
440 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); 486 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
487 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
441 488
442 /* No callback? Tell other side not to bother us. */ 489 /* No callback? Tell other side not to bother us. */
443 if (!callback) 490 if (!callback)
@@ -472,6 +519,8 @@ void vring_transport_features(struct virtio_device *vdev)
472 switch (i) { 519 switch (i) {
473 case VIRTIO_RING_F_INDIRECT_DESC: 520 case VIRTIO_RING_F_INDIRECT_DESC:
474 break; 521 break;
522 case VIRTIO_RING_F_EVENT_IDX:
523 break;
475 default: 524 default:
476 /* We don't understand this bit. */ 525 /* We don't understand this bit. */
477 clear_bit(i, vdev->features); 526 clear_bit(i, vdev->features);