diff options
Diffstat (limited to 'drivers/virtio/virtio_ring.c')
-rw-r--r-- | drivers/virtio/virtio_ring.c | 102 |
1 files changed, 93 insertions, 9 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 5c52369ab9bb..a882f2606515 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -23,21 +23,30 @@ | |||
23 | 23 | ||
24 | #ifdef DEBUG | 24 | #ifdef DEBUG |
25 | /* For development, we want to crash whenever the ring is screwed. */ | 25 | /* For development, we want to crash whenever the ring is screwed. */ |
26 | #define BAD_RING(_vq, fmt...) \ | 26 | #define BAD_RING(_vq, fmt, args...) \ |
27 | do { dev_err(&(_vq)->vq.vdev->dev, fmt); BUG(); } while(0) | 27 | do { \ |
28 | dev_err(&(_vq)->vq.vdev->dev, \ | ||
29 | "%s:"fmt, (_vq)->vq.name, ##args); \ | ||
30 | BUG(); \ | ||
31 | } while (0) | ||
28 | /* Caller is supposed to guarantee no reentry. */ | 32 | /* Caller is supposed to guarantee no reentry. */ |
29 | #define START_USE(_vq) \ | 33 | #define START_USE(_vq) \ |
30 | do { \ | 34 | do { \ |
31 | if ((_vq)->in_use) \ | 35 | if ((_vq)->in_use) \ |
32 | panic("in_use = %i\n", (_vq)->in_use); \ | 36 | panic("%s:in_use = %i\n", \ |
37 | (_vq)->vq.name, (_vq)->in_use); \ | ||
33 | (_vq)->in_use = __LINE__; \ | 38 | (_vq)->in_use = __LINE__; \ |
34 | mb(); \ | 39 | mb(); \ |
35 | } while(0) | 40 | } while (0) |
36 | #define END_USE(_vq) \ | 41 | #define END_USE(_vq) \ |
37 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) | 42 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) |
38 | #else | 43 | #else |
39 | #define BAD_RING(_vq, fmt...) \ | 44 | #define BAD_RING(_vq, fmt, args...) \ |
40 | do { dev_err(&_vq->vq.vdev->dev, fmt); (_vq)->broken = true; } while(0) | 45 | do { \ |
46 | dev_err(&_vq->vq.vdev->dev, \ | ||
47 | "%s:"fmt, (_vq)->vq.name, ##args); \ | ||
48 | (_vq)->broken = true; \ | ||
49 | } while (0) | ||
41 | #define START_USE(vq) | 50 | #define START_USE(vq) |
42 | #define END_USE(vq) | 51 | #define END_USE(vq) |
43 | #endif | 52 | #endif |
@@ -52,6 +61,9 @@ struct vring_virtqueue | |||
52 | /* Other side has made a mess, don't try any more. */ | 61 | /* Other side has made a mess, don't try any more. */ |
53 | bool broken; | 62 | bool broken; |
54 | 63 | ||
64 | /* Host supports indirect buffers */ | ||
65 | bool indirect; | ||
66 | |||
55 | /* Number of free buffers */ | 67 | /* Number of free buffers */ |
56 | unsigned int num_free; | 68 | unsigned int num_free; |
57 | /* Head of free buffer list. */ | 69 | /* Head of free buffer list. */ |
@@ -76,6 +88,55 @@ struct vring_virtqueue | |||
76 | 88 | ||
77 | #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) | 89 | #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) |
78 | 90 | ||
91 | /* Set up an indirect table of descriptors and add it to the queue. */ | ||
92 | static int vring_add_indirect(struct vring_virtqueue *vq, | ||
93 | struct scatterlist sg[], | ||
94 | unsigned int out, | ||
95 | unsigned int in) | ||
96 | { | ||
97 | struct vring_desc *desc; | ||
98 | unsigned head; | ||
99 | int i; | ||
100 | |||
101 | desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC); | ||
102 | if (!desc) | ||
103 | return vq->vring.num; | ||
104 | |||
105 | /* Transfer entries from the sg list into the indirect page */ | ||
106 | for (i = 0; i < out; i++) { | ||
107 | desc[i].flags = VRING_DESC_F_NEXT; | ||
108 | desc[i].addr = sg_phys(sg); | ||
109 | desc[i].len = sg->length; | ||
110 | desc[i].next = i+1; | ||
111 | sg++; | ||
112 | } | ||
113 | for (; i < (out + in); i++) { | ||
114 | desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; | ||
115 | desc[i].addr = sg_phys(sg); | ||
116 | desc[i].len = sg->length; | ||
117 | desc[i].next = i+1; | ||
118 | sg++; | ||
119 | } | ||
120 | |||
121 | /* Last one doesn't continue. */ | ||
122 | desc[i-1].flags &= ~VRING_DESC_F_NEXT; | ||
123 | desc[i-1].next = 0; | ||
124 | |||
125 | /* We're about to use a buffer */ | ||
126 | vq->num_free--; | ||
127 | |||
128 | /* Use a single buffer which doesn't continue */ | ||
129 | head = vq->free_head; | ||
130 | vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; | ||
131 | vq->vring.desc[head].addr = virt_to_phys(desc); | ||
132 | vq->vring.desc[head].len = i * sizeof(struct vring_desc); | ||
133 | |||
134 | /* Update free pointer */ | ||
135 | vq->free_head = vq->vring.desc[head].next; | ||
136 | |||
137 | return head; | ||
138 | } | ||
139 | |||
79 | static int vring_add_buf(struct virtqueue *_vq, | 140 | static int vring_add_buf(struct virtqueue *_vq, |
80 | struct scatterlist sg[], | 141 | struct scatterlist sg[], |
81 | unsigned int out, | 142 | unsigned int out, |
@@ -85,12 +146,21 @@ static int vring_add_buf(struct virtqueue *_vq, | |||
85 | struct vring_virtqueue *vq = to_vvq(_vq); | 146 | struct vring_virtqueue *vq = to_vvq(_vq); |
86 | unsigned int i, avail, head, uninitialized_var(prev); | 147 | unsigned int i, avail, head, uninitialized_var(prev); |
87 | 148 | ||
149 | START_USE(vq); | ||
150 | |||
88 | BUG_ON(data == NULL); | 151 | BUG_ON(data == NULL); |
152 | |||
153 | /* If the host supports indirect descriptor tables, and we have multiple | ||
154 | * buffers, then go indirect. FIXME: tune this threshold */ | ||
155 | if (vq->indirect && (out + in) > 1 && vq->num_free) { | ||
156 | head = vring_add_indirect(vq, sg, out, in); | ||
157 | if (head != vq->vring.num) | ||
158 | goto add_head; | ||
159 | } | ||
160 | |||
89 | BUG_ON(out + in > vq->vring.num); | 161 | BUG_ON(out + in > vq->vring.num); |
90 | BUG_ON(out + in == 0); | 162 | BUG_ON(out + in == 0); |
91 | 163 | ||
92 | START_USE(vq); | ||
93 | |||
94 | if (vq->num_free < out + in) { | 164 | if (vq->num_free < out + in) { |
95 | pr_debug("Can't add buf len %i - avail = %i\n", | 165 | pr_debug("Can't add buf len %i - avail = %i\n", |
96 | out + in, vq->num_free); | 166 | out + in, vq->num_free); |
@@ -127,6 +197,7 @@ static int vring_add_buf(struct virtqueue *_vq, | |||
127 | /* Update free pointer */ | 197 | /* Update free pointer */ |
128 | vq->free_head = i; | 198 | vq->free_head = i; |
129 | 199 | ||
200 | add_head: | ||
130 | /* Set token. */ | 201 | /* Set token. */ |
131 | vq->data[head] = data; | 202 | vq->data[head] = data; |
132 | 203 | ||
@@ -170,6 +241,11 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) | |||
170 | 241 | ||
171 | /* Put back on free list: find end */ | 242 | /* Put back on free list: find end */ |
172 | i = head; | 243 | i = head; |
244 | |||
245 | /* Free the indirect table */ | ||
246 | if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) | ||
247 | kfree(phys_to_virt(vq->vring.desc[i].addr)); | ||
248 | |||
173 | while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { | 249 | while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { |
174 | i = vq->vring.desc[i].next; | 250 | i = vq->vring.desc[i].next; |
175 | vq->num_free++; | 251 | vq->num_free++; |
@@ -284,7 +360,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, | |||
284 | struct virtio_device *vdev, | 360 | struct virtio_device *vdev, |
285 | void *pages, | 361 | void *pages, |
286 | void (*notify)(struct virtqueue *), | 362 | void (*notify)(struct virtqueue *), |
287 | void (*callback)(struct virtqueue *)) | 363 | void (*callback)(struct virtqueue *), |
364 | const char *name) | ||
288 | { | 365 | { |
289 | struct vring_virtqueue *vq; | 366 | struct vring_virtqueue *vq; |
290 | unsigned int i; | 367 | unsigned int i; |
@@ -303,14 +380,18 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, | |||
303 | vq->vq.callback = callback; | 380 | vq->vq.callback = callback; |
304 | vq->vq.vdev = vdev; | 381 | vq->vq.vdev = vdev; |
305 | vq->vq.vq_ops = &vring_vq_ops; | 382 | vq->vq.vq_ops = &vring_vq_ops; |
383 | vq->vq.name = name; | ||
306 | vq->notify = notify; | 384 | vq->notify = notify; |
307 | vq->broken = false; | 385 | vq->broken = false; |
308 | vq->last_used_idx = 0; | 386 | vq->last_used_idx = 0; |
309 | vq->num_added = 0; | 387 | vq->num_added = 0; |
388 | list_add_tail(&vq->vq.list, &vdev->vqs); | ||
310 | #ifdef DEBUG | 389 | #ifdef DEBUG |
311 | vq->in_use = false; | 390 | vq->in_use = false; |
312 | #endif | 391 | #endif |
313 | 392 | ||
393 | vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); | ||
394 | |||
314 | /* No callback? Tell other side not to bother us. */ | 395 | /* No callback? Tell other side not to bother us. */ |
315 | if (!callback) | 396 | if (!callback) |
316 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; | 397 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; |
@@ -327,6 +408,7 @@ EXPORT_SYMBOL_GPL(vring_new_virtqueue); | |||
327 | 408 | ||
328 | void vring_del_virtqueue(struct virtqueue *vq) | 409 | void vring_del_virtqueue(struct virtqueue *vq) |
329 | { | 410 | { |
411 | list_del(&vq->list); | ||
330 | kfree(to_vvq(vq)); | 412 | kfree(to_vvq(vq)); |
331 | } | 413 | } |
332 | EXPORT_SYMBOL_GPL(vring_del_virtqueue); | 414 | EXPORT_SYMBOL_GPL(vring_del_virtqueue); |
@@ -338,6 +420,8 @@ void vring_transport_features(struct virtio_device *vdev) | |||
338 | 420 | ||
339 | for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { | 421 | for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { |
340 | switch (i) { | 422 | switch (i) { |
423 | case VIRTIO_RING_F_INDIRECT_DESC: | ||
424 | break; | ||
341 | default: | 425 | default: |
342 | /* We don't understand this bit. */ | 426 | /* We don't understand this bit. */ |
343 | clear_bit(i, vdev->features); | 427 | clear_bit(i, vdev->features); |