aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2017-02-06 23:15:13 -0500
committerMichael S. Tsirkin <mst@redhat.com>2017-05-02 16:41:43 -0400
commit5a08b04f637921e44ba767c07c74b0535504ab71 (patch)
tree41123464f2875505de9989115a1104ceb974d589
parent0a12ae4024a5167c30444d722b0cbafbdb5f4b57 (diff)
virtio: allow extra context per descriptor
Allow extra context per descriptor. To avoid slow down for data path, this disables use of indirect descriptors for this vq. Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-rw-r--r--drivers/virtio/virtio_ring.c70
-rw-r--r--include/linux/virtio.h9
2 files changed, 66 insertions, 13 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index b23b5fae468b..5e1b548828e6 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -263,6 +263,7 @@ static inline int virtqueue_add(struct virtqueue *_vq,
263 unsigned int out_sgs, 263 unsigned int out_sgs,
264 unsigned int in_sgs, 264 unsigned int in_sgs,
265 void *data, 265 void *data,
266 void *ctx,
266 gfp_t gfp) 267 gfp_t gfp)
267{ 268{
268 struct vring_virtqueue *vq = to_vvq(_vq); 269 struct vring_virtqueue *vq = to_vvq(_vq);
@@ -275,6 +276,7 @@ static inline int virtqueue_add(struct virtqueue *_vq,
275 START_USE(vq); 276 START_USE(vq);
276 277
277 BUG_ON(data == NULL); 278 BUG_ON(data == NULL);
279 BUG_ON(ctx && vq->indirect);
278 280
279 if (unlikely(vq->broken)) { 281 if (unlikely(vq->broken)) {
280 END_USE(vq); 282 END_USE(vq);
@@ -389,6 +391,8 @@ static inline int virtqueue_add(struct virtqueue *_vq,
389 vq->desc_state[head].data = data; 391 vq->desc_state[head].data = data;
390 if (indirect) 392 if (indirect)
391 vq->desc_state[head].indir_desc = desc; 393 vq->desc_state[head].indir_desc = desc;
394 if (ctx)
395 vq->desc_state[head].indir_desc = ctx;
392 396
393 /* Put entry in available array (but don't update avail->idx until they 397 /* Put entry in available array (but don't update avail->idx until they
394 * do sync). */ 398 * do sync). */
@@ -461,7 +465,8 @@ int virtqueue_add_sgs(struct virtqueue *_vq,
461 for (sg = sgs[i]; sg; sg = sg_next(sg)) 465 for (sg = sgs[i]; sg; sg = sg_next(sg))
462 total_sg++; 466 total_sg++;
463 } 467 }
464 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); 468 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
469 data, NULL, gfp);
465} 470}
466EXPORT_SYMBOL_GPL(virtqueue_add_sgs); 471EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
467 472
@@ -483,7 +488,7 @@ int virtqueue_add_outbuf(struct virtqueue *vq,
483 void *data, 488 void *data,
484 gfp_t gfp) 489 gfp_t gfp)
485{ 490{
486 return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); 491 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
487} 492}
488EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); 493EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
489 494
@@ -505,11 +510,35 @@ int virtqueue_add_inbuf(struct virtqueue *vq,
505 void *data, 510 void *data,
506 gfp_t gfp) 511 gfp_t gfp)
507{ 512{
508 return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); 513 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
509} 514}
510EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); 515EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
511 516
512/** 517/**
518 * virtqueue_add_inbuf_ctx - expose input buffers to other end
519 * @vq: the struct virtqueue we're talking about.
520 * @sg: scatterlist (must be well-formed and terminated!)
521 * @num: the number of entries in @sg writable by other side
522 * @data: the token identifying the buffer.
523 * @ctx: extra context for the token
524 * @gfp: how to do memory allocations (if necessary).
525 *
526 * Caller must ensure we don't call this with other virtqueue operations
527 * at the same time (except where noted).
528 *
529 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
530 */
531int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
532 struct scatterlist *sg, unsigned int num,
533 void *data,
534 void *ctx,
535 gfp_t gfp)
536{
537 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
538}
539EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
540
541/**
513 * virtqueue_kick_prepare - first half of split virtqueue_kick call. 542 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
514 * @vq: the struct virtqueue 543 * @vq: the struct virtqueue
515 * 544 *
@@ -598,7 +627,8 @@ bool virtqueue_kick(struct virtqueue *vq)
598} 627}
599EXPORT_SYMBOL_GPL(virtqueue_kick); 628EXPORT_SYMBOL_GPL(virtqueue_kick);
600 629
601static void detach_buf(struct vring_virtqueue *vq, unsigned int head) 630static void detach_buf(struct vring_virtqueue *vq, unsigned int head,
631 void **ctx)
602{ 632{
603 unsigned int i, j; 633 unsigned int i, j;
604 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); 634 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
@@ -622,10 +652,15 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
622 /* Plus final descriptor */ 652 /* Plus final descriptor */
623 vq->vq.num_free++; 653 vq->vq.num_free++;
624 654
625 /* Free the indirect table, if any, now that it's unmapped. */ 655 if (vq->indirect) {
626 if (vq->desc_state[head].indir_desc) {
627 struct vring_desc *indir_desc = vq->desc_state[head].indir_desc; 656 struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
628 u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len); 657 u32 len;
658
659 /* Free the indirect table, if any, now that it's unmapped. */
660 if (!indir_desc)
661 return;
662
663 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
629 664
630 BUG_ON(!(vq->vring.desc[head].flags & 665 BUG_ON(!(vq->vring.desc[head].flags &
631 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))); 666 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
@@ -634,8 +669,10 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
634 for (j = 0; j < len / sizeof(struct vring_desc); j++) 669 for (j = 0; j < len / sizeof(struct vring_desc); j++)
635 vring_unmap_one(vq, &indir_desc[j]); 670 vring_unmap_one(vq, &indir_desc[j]);
636 671
637 kfree(vq->desc_state[head].indir_desc); 672 kfree(indir_desc);
638 vq->desc_state[head].indir_desc = NULL; 673 vq->desc_state[head].indir_desc = NULL;
674 } else if (ctx) {
675 *ctx = vq->desc_state[head].indir_desc;
639 } 676 }
640} 677}
641 678
@@ -660,7 +697,8 @@ static inline bool more_used(const struct vring_virtqueue *vq)
660 * Returns NULL if there are no used buffers, or the "data" token 697 * Returns NULL if there are no used buffers, or the "data" token
661 * handed to virtqueue_add_*(). 698 * handed to virtqueue_add_*().
662 */ 699 */
663void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 700void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
701 void **ctx)
664{ 702{
665 struct vring_virtqueue *vq = to_vvq(_vq); 703 struct vring_virtqueue *vq = to_vvq(_vq);
666 void *ret; 704 void *ret;
@@ -698,7 +736,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
698 736
699 /* detach_buf clears data, so grab it now. */ 737 /* detach_buf clears data, so grab it now. */
700 ret = vq->desc_state[i].data; 738 ret = vq->desc_state[i].data;
701 detach_buf(vq, i); 739 detach_buf(vq, i, ctx);
702 vq->last_used_idx++; 740 vq->last_used_idx++;
703 /* If we expect an interrupt for the next entry, tell host 741 /* If we expect an interrupt for the next entry, tell host
704 * by writing event index and flush out the write before 742 * by writing event index and flush out the write before
@@ -715,8 +753,13 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
715 END_USE(vq); 753 END_USE(vq);
716 return ret; 754 return ret;
717} 755}
718EXPORT_SYMBOL_GPL(virtqueue_get_buf); 756EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
719 757
758void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
759{
760 return virtqueue_get_buf_ctx(_vq, len, NULL);
761}
762EXPORT_SYMBOL_GPL(virtqueue_get_buf);
720/** 763/**
721 * virtqueue_disable_cb - disable callbacks 764 * virtqueue_disable_cb - disable callbacks
722 * @vq: the struct virtqueue we're talking about. 765 * @vq: the struct virtqueue we're talking about.
@@ -878,7 +921,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
878 continue; 921 continue;
879 /* detach_buf clears data, so grab it now. */ 922 /* detach_buf clears data, so grab it now. */
880 buf = vq->desc_state[i].data; 923 buf = vq->desc_state[i].data;
881 detach_buf(vq, i); 924 detach_buf(vq, i, NULL);
882 vq->avail_idx_shadow--; 925 vq->avail_idx_shadow--;
883 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); 926 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
884 END_USE(vq); 927 END_USE(vq);
@@ -951,7 +994,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
951 vq->last_add_time_valid = false; 994 vq->last_add_time_valid = false;
952#endif 995#endif
953 996
954 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); 997 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
998 !context;
955 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 999 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
956 1000
957 /* No callback? Tell other side not to bother us. */ 1001 /* No callback? Tell other side not to bother us. */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 7edfbdb55a99..ed04753278d4 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -44,6 +44,12 @@ int virtqueue_add_inbuf(struct virtqueue *vq,
44 void *data, 44 void *data,
45 gfp_t gfp); 45 gfp_t gfp);
46 46
47int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
48 struct scatterlist sg[], unsigned int num,
49 void *data,
50 void *ctx,
51 gfp_t gfp);
52
47int virtqueue_add_sgs(struct virtqueue *vq, 53int virtqueue_add_sgs(struct virtqueue *vq,
48 struct scatterlist *sgs[], 54 struct scatterlist *sgs[],
49 unsigned int out_sgs, 55 unsigned int out_sgs,
@@ -59,6 +65,9 @@ bool virtqueue_notify(struct virtqueue *vq);
59 65
60void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); 66void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
61 67
68void *virtqueue_get_buf_ctx(struct virtqueue *vq, unsigned int *len,
69 void **ctx);
70
62void virtqueue_disable_cb(struct virtqueue *vq); 71void virtqueue_disable_cb(struct virtqueue *vq);
63 72
64bool virtqueue_enable_cb(struct virtqueue *vq); 73bool virtqueue_enable_cb(struct virtqueue *vq);