aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/virtio/virtio_ring.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2012-10-16 09:26:14 -0400
committerRusty Russell <rusty@rustcorp.com.au>2012-12-17 23:50:31 -0500
commit06ca287dbac9cc19d04ac2901b8c4882c03795ff (patch)
tree37574b40e4b18070fe92e88f722035159aa341fb /drivers/virtio/virtio_ring.c
parent1ce6853aa0f8e1cc3ae811a85d50cde6ad0ef735 (diff)
virtio: move queue_index and num_free fields into core struct virtqueue.
They're generic concepts, so hoist them. This also avoids accessor functions (though kept around for merge with DaveM's net tree). This goes even further than Jason Wang's 17bb6d4088 patch ("virtio-ring: move queue_index to vring_virtqueue") which moved the queue_index from the specific transport. Acked-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers/virtio/virtio_ring.c')
-rw-r--r--drivers/virtio/virtio_ring.c34
1 files changed, 11 insertions, 23 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 286c30cb393..33a4ce009bc 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -93,8 +93,6 @@ struct vring_virtqueue
93 /* Host publishes avail event idx */ 93 /* Host publishes avail event idx */
94 bool event; 94 bool event;
95 95
96 /* Number of free buffers */
97 unsigned int num_free;
98 /* Head of free buffer list. */ 96 /* Head of free buffer list. */
99 unsigned int free_head; 97 unsigned int free_head;
100 /* Number we've added since last sync. */ 98 /* Number we've added since last sync. */
@@ -106,9 +104,6 @@ struct vring_virtqueue
106 /* How to notify other side. FIXME: commonalize hcalls! */ 104 /* How to notify other side. FIXME: commonalize hcalls! */
107 void (*notify)(struct virtqueue *vq); 105 void (*notify)(struct virtqueue *vq);
108 106
109 /* Index of the queue */
110 int queue_index;
111
112#ifdef DEBUG 107#ifdef DEBUG
113 /* They're supposed to lock for us. */ 108 /* They're supposed to lock for us. */
114 unsigned int in_use; 109 unsigned int in_use;
@@ -167,7 +162,7 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
167 desc[i-1].next = 0; 162 desc[i-1].next = 0;
168 163
169 /* We're about to use a buffer */ 164 /* We're about to use a buffer */
170 vq->num_free--; 165 vq->vq.num_free--;
171 166
172 /* Use a single buffer which doesn't continue */ 167 /* Use a single buffer which doesn't continue */
173 head = vq->free_head; 168 head = vq->free_head;
@@ -181,13 +176,6 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
181 return head; 176 return head;
182} 177}
183 178
184int virtqueue_get_queue_index(struct virtqueue *_vq)
185{
186 struct vring_virtqueue *vq = to_vvq(_vq);
187 return vq->queue_index;
188}
189EXPORT_SYMBOL_GPL(virtqueue_get_queue_index);
190
191/** 179/**
192 * virtqueue_add_buf - expose buffer to other end 180 * virtqueue_add_buf - expose buffer to other end
193 * @vq: the struct virtqueue we're talking about. 181 * @vq: the struct virtqueue we're talking about.
@@ -235,7 +223,7 @@ int virtqueue_add_buf(struct virtqueue *_vq,
235 223
236 /* If the host supports indirect descriptor tables, and we have multiple 224 /* If the host supports indirect descriptor tables, and we have multiple
237 * buffers, then go indirect. FIXME: tune this threshold */ 225 * buffers, then go indirect. FIXME: tune this threshold */
238 if (vq->indirect && (out + in) > 1 && vq->num_free) { 226 if (vq->indirect && (out + in) > 1 && vq->vq.num_free) {
239 head = vring_add_indirect(vq, sg, out, in, gfp); 227 head = vring_add_indirect(vq, sg, out, in, gfp);
240 if (likely(head >= 0)) 228 if (likely(head >= 0))
241 goto add_head; 229 goto add_head;
@@ -244,9 +232,9 @@ int virtqueue_add_buf(struct virtqueue *_vq,
244 BUG_ON(out + in > vq->vring.num); 232 BUG_ON(out + in > vq->vring.num);
245 BUG_ON(out + in == 0); 233 BUG_ON(out + in == 0);
246 234
247 if (vq->num_free < out + in) { 235 if (vq->vq.num_free < out + in) {
248 pr_debug("Can't add buf len %i - avail = %i\n", 236 pr_debug("Can't add buf len %i - avail = %i\n",
249 out + in, vq->num_free); 237 out + in, vq->vq.num_free);
250 /* FIXME: for historical reasons, we force a notify here if 238 /* FIXME: for historical reasons, we force a notify here if
251 * there are outgoing parts to the buffer. Presumably the 239 * there are outgoing parts to the buffer. Presumably the
252 * host should service the ring ASAP. */ 240 * host should service the ring ASAP. */
@@ -257,7 +245,7 @@ int virtqueue_add_buf(struct virtqueue *_vq,
257 } 245 }
258 246
259 /* We're about to use some buffers from the free list. */ 247 /* We're about to use some buffers from the free list. */
260 vq->num_free -= out + in; 248 vq->vq.num_free -= out + in;
261 249
262 head = vq->free_head; 250 head = vq->free_head;
263 for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { 251 for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
@@ -303,7 +291,7 @@ add_head:
303 pr_debug("Added buffer head %i to %p\n", head, vq); 291 pr_debug("Added buffer head %i to %p\n", head, vq);
304 END_USE(vq); 292 END_USE(vq);
305 293
306 return vq->num_free; 294 return vq->vq.num_free;
307} 295}
308EXPORT_SYMBOL_GPL(virtqueue_add_buf); 296EXPORT_SYMBOL_GPL(virtqueue_add_buf);
309 297
@@ -400,13 +388,13 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
400 388
401 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { 389 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
402 i = vq->vring.desc[i].next; 390 i = vq->vring.desc[i].next;
403 vq->num_free++; 391 vq->vq.num_free++;
404 } 392 }
405 393
406 vq->vring.desc[i].next = vq->free_head; 394 vq->vring.desc[i].next = vq->free_head;
407 vq->free_head = head; 395 vq->free_head = head;
408 /* Plus final descriptor */ 396 /* Plus final descriptor */
409 vq->num_free++; 397 vq->vq.num_free++;
410} 398}
411 399
412static inline bool more_used(const struct vring_virtqueue *vq) 400static inline bool more_used(const struct vring_virtqueue *vq)
@@ -606,7 +594,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
606 return buf; 594 return buf;
607 } 595 }
608 /* That should have freed everything. */ 596 /* That should have freed everything. */
609 BUG_ON(vq->num_free != vq->vring.num); 597 BUG_ON(vq->vq.num_free != vq->vring.num);
610 598
611 END_USE(vq); 599 END_USE(vq);
612 return NULL; 600 return NULL;
@@ -660,12 +648,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
660 vq->vq.callback = callback; 648 vq->vq.callback = callback;
661 vq->vq.vdev = vdev; 649 vq->vq.vdev = vdev;
662 vq->vq.name = name; 650 vq->vq.name = name;
651 vq->vq.num_free = num;
652 vq->vq.index = index;
663 vq->notify = notify; 653 vq->notify = notify;
664 vq->weak_barriers = weak_barriers; 654 vq->weak_barriers = weak_barriers;
665 vq->broken = false; 655 vq->broken = false;
666 vq->last_used_idx = 0; 656 vq->last_used_idx = 0;
667 vq->num_added = 0; 657 vq->num_added = 0;
668 vq->queue_index = index;
669 list_add_tail(&vq->vq.list, &vdev->vqs); 658 list_add_tail(&vq->vq.list, &vdev->vqs);
670#ifdef DEBUG 659#ifdef DEBUG
671 vq->in_use = false; 660 vq->in_use = false;
@@ -680,7 +669,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
680 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 669 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
681 670
682 /* Put everything in free lists. */ 671 /* Put everything in free lists. */
683 vq->num_free = num;
684 vq->free_head = 0; 672 vq->free_head = 0;
685 for (i = 0; i < num-1; i++) { 673 for (i = 0; i < num-1; i++) {
686 vq->vring.desc[i].next = i+1; 674 vq->vring.desc[i].next = i+1;