aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2014-10-22 08:42:09 -0400
committerMichael S. Tsirkin <mst@redhat.com>2014-12-09 05:05:25 -0500
commit00e6f3d9d9e356dbf08369ffc4576f79438d51ea (patch)
tree91fbb68059950b8c990ccaeea84ef9cfc0a8b718
parenteef960a04354d13426c43a4e3750a5e2b383040c (diff)
virtio_ring: switch to new memory access APIs
Use virtioXX_to_cpu and friends for access to all multibyte structures in memory. Note: this is intentionally mechanical. A follow-up patch will split long lines etc. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
-rw-r--r--drivers/virtio/virtio_ring.c89
1 files changed, 45 insertions, 44 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 839247cd8263..0d3c73737652 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -99,7 +99,8 @@ struct vring_virtqueue
99 99
100#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 100#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
101 101
102static struct vring_desc *alloc_indirect(unsigned int total_sg, gfp_t gfp) 102static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
103 unsigned int total_sg, gfp_t gfp)
103{ 104{
104 struct vring_desc *desc; 105 struct vring_desc *desc;
105 unsigned int i; 106 unsigned int i;
@@ -116,7 +117,7 @@ static struct vring_desc *alloc_indirect(unsigned int total_sg, gfp_t gfp)
116 return NULL; 117 return NULL;
117 118
118 for (i = 0; i < total_sg; i++) 119 for (i = 0; i < total_sg; i++)
119 desc[i].next = i+1; 120 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
120 return desc; 121 return desc;
121} 122}
122 123
@@ -165,17 +166,17 @@ static inline int virtqueue_add(struct virtqueue *_vq,
165 /* If the host supports indirect descriptor tables, and we have multiple 166 /* If the host supports indirect descriptor tables, and we have multiple
166 * buffers, then go indirect. FIXME: tune this threshold */ 167 * buffers, then go indirect. FIXME: tune this threshold */
167 if (vq->indirect && total_sg > 1 && vq->vq.num_free) 168 if (vq->indirect && total_sg > 1 && vq->vq.num_free)
168 desc = alloc_indirect(total_sg, gfp); 169 desc = alloc_indirect(_vq, total_sg, gfp);
169 else 170 else
170 desc = NULL; 171 desc = NULL;
171 172
172 if (desc) { 173 if (desc) {
173 /* Use a single buffer which doesn't continue */ 174 /* Use a single buffer which doesn't continue */
174 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; 175 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
175 vq->vring.desc[head].addr = virt_to_phys(desc); 176 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc));
176 /* avoid kmemleak false positive (hidden by virt_to_phys) */ 177 /* avoid kmemleak false positive (hidden by virt_to_phys) */
177 kmemleak_ignore(desc); 178 kmemleak_ignore(desc);
178 vq->vring.desc[head].len = total_sg * sizeof(struct vring_desc); 179 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
179 180
180 /* Set up rest to use this indirect table. */ 181 /* Set up rest to use this indirect table. */
181 i = 0; 182 i = 0;
@@ -205,28 +206,28 @@ static inline int virtqueue_add(struct virtqueue *_vq,
205 206
206 for (n = 0; n < out_sgs; n++) { 207 for (n = 0; n < out_sgs; n++) {
207 for (sg = sgs[n]; sg; sg = sg_next(sg)) { 208 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
208 desc[i].flags = VRING_DESC_F_NEXT; 209 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
209 desc[i].addr = sg_phys(sg); 210 desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
210 desc[i].len = sg->length; 211 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
211 prev = i; 212 prev = i;
212 i = desc[i].next; 213 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
213 } 214 }
214 } 215 }
215 for (; n < (out_sgs + in_sgs); n++) { 216 for (; n < (out_sgs + in_sgs); n++) {
216 for (sg = sgs[n]; sg; sg = sg_next(sg)) { 217 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
217 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; 218 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
218 desc[i].addr = sg_phys(sg); 219 desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
219 desc[i].len = sg->length; 220 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
220 prev = i; 221 prev = i;
221 i = desc[i].next; 222 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
222 } 223 }
223 } 224 }
224 /* Last one doesn't continue. */ 225 /* Last one doesn't continue. */
225 desc[prev].flags &= ~VRING_DESC_F_NEXT; 226 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
226 227
227 /* Update free pointer */ 228 /* Update free pointer */
228 if (indirect) 229 if (indirect)
229 vq->free_head = vq->vring.desc[head].next; 230 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
230 else 231 else
231 vq->free_head = i; 232 vq->free_head = i;
232 233
@@ -235,13 +236,13 @@ static inline int virtqueue_add(struct virtqueue *_vq,
235 236
236 /* Put entry in available array (but don't update avail->idx until they 237 /* Put entry in available array (but don't update avail->idx until they
237 * do sync). */ 238 * do sync). */
238 avail = (vq->vring.avail->idx & (vq->vring.num-1)); 239 avail = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) & (vq->vring.num - 1);
239 vq->vring.avail->ring[avail] = head; 240 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
240 241
241 /* Descriptors and available array need to be set before we expose the 242 /* Descriptors and available array need to be set before we expose the
242 * new available array entries. */ 243 * new available array entries. */
243 virtio_wmb(vq->weak_barriers); 244 virtio_wmb(vq->weak_barriers);
244 vq->vring.avail->idx++; 245 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1);
245 vq->num_added++; 246 vq->num_added++;
246 247
247 /* This is very unlikely, but theoretically possible. Kick 248 /* This is very unlikely, but theoretically possible. Kick
@@ -354,8 +355,8 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
354 * event. */ 355 * event. */
355 virtio_mb(vq->weak_barriers); 356 virtio_mb(vq->weak_barriers);
356 357
357 old = vq->vring.avail->idx - vq->num_added; 358 old = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->num_added;
358 new = vq->vring.avail->idx; 359 new = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx);
359 vq->num_added = 0; 360 vq->num_added = 0;
360 361
361#ifdef DEBUG 362#ifdef DEBUG
@@ -367,10 +368,10 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
367#endif 368#endif
368 369
369 if (vq->event) { 370 if (vq->event) {
370 needs_kick = vring_need_event(vring_avail_event(&vq->vring), 371 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
371 new, old); 372 new, old);
372 } else { 373 } else {
373 needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); 374 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
374 } 375 }
375 END_USE(vq); 376 END_USE(vq);
376 return needs_kick; 377 return needs_kick;
@@ -432,15 +433,15 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
432 i = head; 433 i = head;
433 434
434 /* Free the indirect table */ 435 /* Free the indirect table */
435 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) 436 if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))
436 kfree(phys_to_virt(vq->vring.desc[i].addr)); 437 kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr)));
437 438
438 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { 439 while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) {
439 i = vq->vring.desc[i].next; 440 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
440 vq->vq.num_free++; 441 vq->vq.num_free++;
441 } 442 }
442 443
443 vq->vring.desc[i].next = vq->free_head; 444 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
444 vq->free_head = head; 445 vq->free_head = head;
445 /* Plus final descriptor */ 446 /* Plus final descriptor */
446 vq->vq.num_free++; 447 vq->vq.num_free++;
@@ -448,7 +449,7 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
448 449
449static inline bool more_used(const struct vring_virtqueue *vq) 450static inline bool more_used(const struct vring_virtqueue *vq)
450{ 451{
451 return vq->last_used_idx != vq->vring.used->idx; 452 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
452} 453}
453 454
454/** 455/**
@@ -491,8 +492,8 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
491 virtio_rmb(vq->weak_barriers); 492 virtio_rmb(vq->weak_barriers);
492 493
493 last_used = (vq->last_used_idx & (vq->vring.num - 1)); 494 last_used = (vq->last_used_idx & (vq->vring.num - 1));
494 i = vq->vring.used->ring[last_used].id; 495 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
495 *len = vq->vring.used->ring[last_used].len; 496 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
496 497
497 if (unlikely(i >= vq->vring.num)) { 498 if (unlikely(i >= vq->vring.num)) {
498 BAD_RING(vq, "id %u out of range\n", i); 499 BAD_RING(vq, "id %u out of range\n", i);
@@ -510,8 +511,8 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
510 /* If we expect an interrupt for the next entry, tell host 511 /* If we expect an interrupt for the next entry, tell host
511 * by writing event index and flush out the write before 512 * by writing event index and flush out the write before
512 * the read in the next get_buf call. */ 513 * the read in the next get_buf call. */
513 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { 514 if (!(vq->vring.avail->flags & cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT))) {
514 vring_used_event(&vq->vring) = vq->last_used_idx; 515 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx);
515 virtio_mb(vq->weak_barriers); 516 virtio_mb(vq->weak_barriers);
516 } 517 }
517 518
@@ -537,7 +538,7 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
537{ 538{
538 struct vring_virtqueue *vq = to_vvq(_vq); 539 struct vring_virtqueue *vq = to_vvq(_vq);
539 540
540 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 541 vq->vring.avail->flags |= cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT);
541} 542}
542EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 543EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
543 544
@@ -565,8 +566,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
565 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 566 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
566 * either clear the flags bit or point the event index at the next 567 * either clear the flags bit or point the event index at the next
567 * entry. Always do both to keep code simple. */ 568 * entry. Always do both to keep code simple. */
568 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 569 vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT);
569 vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; 570 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
570 END_USE(vq); 571 END_USE(vq);
571 return last_used_idx; 572 return last_used_idx;
572} 573}
@@ -586,7 +587,7 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
586 struct vring_virtqueue *vq = to_vvq(_vq); 587 struct vring_virtqueue *vq = to_vvq(_vq);
587 588
588 virtio_mb(vq->weak_barriers); 589 virtio_mb(vq->weak_barriers);
589 return (u16)last_used_idx != vq->vring.used->idx; 590 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
590} 591}
591EXPORT_SYMBOL_GPL(virtqueue_poll); 592EXPORT_SYMBOL_GPL(virtqueue_poll);
592 593
@@ -633,12 +634,12 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
633 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 634 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
634 * either clear the flags bit or point the event index at the next 635 * either clear the flags bit or point the event index at the next
635 * entry. Always do both to keep code simple. */ 636 * entry. Always do both to keep code simple. */
636 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 637 vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT);
637 /* TODO: tune this threshold */ 638 /* TODO: tune this threshold */
638 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; 639 bufs = (u16)(virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->last_used_idx) * 3 / 4;
639 vring_used_event(&vq->vring) = vq->last_used_idx + bufs; 640 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs);
640 virtio_mb(vq->weak_barriers); 641 virtio_mb(vq->weak_barriers);
641 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { 642 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
642 END_USE(vq); 643 END_USE(vq);
643 return false; 644 return false;
644 } 645 }
@@ -670,7 +671,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
670 /* detach_buf clears data, so grab it now. */ 671 /* detach_buf clears data, so grab it now. */
671 buf = vq->data[i]; 672 buf = vq->data[i];
672 detach_buf(vq, i); 673 detach_buf(vq, i);
673 vq->vring.avail->idx--; 674 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - 1);
674 END_USE(vq); 675 END_USE(vq);
675 return buf; 676 return buf;
676 } 677 }
@@ -747,12 +748,12 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
747 748
748 /* No callback? Tell other side not to bother us. */ 749 /* No callback? Tell other side not to bother us. */
749 if (!callback) 750 if (!callback)
750 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 751 vq->vring.avail->flags |= cpu_to_virtio16(vdev, VRING_AVAIL_F_NO_INTERRUPT);
751 752
752 /* Put everything in free lists. */ 753 /* Put everything in free lists. */
753 vq->free_head = 0; 754 vq->free_head = 0;
754 for (i = 0; i < num-1; i++) { 755 for (i = 0; i < num-1; i++) {
755 vq->vring.desc[i].next = i+1; 756 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
756 vq->data[i] = NULL; 757 vq->data[i] = NULL;
757 } 758 }
758 vq->data[i] = NULL; 759 vq->data[i] = NULL;