aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/virtio
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2013-03-17 22:52:19 -0400
committerRusty Russell <rusty@rustcorp.com.au>2013-03-19 23:30:41 -0400
commita9a0fef779074838230e04a322fd2bdc921f4f4f (patch)
treea7d25b5002e84ac3df54788ec2620ccc1f7c2c21 /drivers/virtio
parent73640c991e2f2804939af70567b23e4c54b7c266 (diff)
virtio_ring: expose virtio barriers for use in vringh.
The host side of ring needs this logic too. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers/virtio')
-rw-r--r--drivers/virtio/virtio_ring.c33
1 files changed, 6 insertions, 27 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ffd7e7da5d3b..245177c286ae 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -24,27 +24,6 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/hrtimer.h> 25#include <linux/hrtimer.h>
26 26
27/* virtio guest is communicating with a virtual "device" that actually runs on
28 * a host processor. Memory barriers are used to control SMP effects. */
29#ifdef CONFIG_SMP
30/* Where possible, use SMP barriers which are more lightweight than mandatory
31 * barriers, because mandatory barriers control MMIO effects on accesses
32 * through relaxed memory I/O windows (which virtio-pci does not use). */
33#define virtio_mb(vq) \
34 do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
35#define virtio_rmb(vq) \
36 do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
37#define virtio_wmb(vq) \
38 do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0)
39#else
40/* We must force memory ordering even if guest is UP since host could be
41 * running on another CPU, but SMP barriers are defined to barrier() in that
42 * configuration. So fall back to mandatory barriers instead. */
43#define virtio_mb(vq) mb()
44#define virtio_rmb(vq) rmb()
45#define virtio_wmb(vq) wmb()
46#endif
47
48#ifdef DEBUG 27#ifdef DEBUG
49/* For development, we want to crash whenever the ring is screwed. */ 28/* For development, we want to crash whenever the ring is screwed. */
50#define BAD_RING(_vq, fmt, args...) \ 29#define BAD_RING(_vq, fmt, args...) \
@@ -276,7 +255,7 @@ add_head:
276 255
277 /* Descriptors and available array need to be set before we expose the 256 /* Descriptors and available array need to be set before we expose the
278 * new available array entries. */ 257 * new available array entries. */
279 virtio_wmb(vq); 258 virtio_wmb(vq->weak_barriers);
280 vq->vring.avail->idx++; 259 vq->vring.avail->idx++;
281 vq->num_added++; 260 vq->num_added++;
282 261
@@ -312,7 +291,7 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
312 START_USE(vq); 291 START_USE(vq);
313 /* We need to expose available array entries before checking avail 292 /* We need to expose available array entries before checking avail
314 * event. */ 293 * event. */
315 virtio_mb(vq); 294 virtio_mb(vq->weak_barriers);
316 295
317 old = vq->vring.avail->idx - vq->num_added; 296 old = vq->vring.avail->idx - vq->num_added;
318 new = vq->vring.avail->idx; 297 new = vq->vring.avail->idx;
@@ -436,7 +415,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
436 } 415 }
437 416
438 /* Only get used array entries after they have been exposed by host. */ 417 /* Only get used array entries after they have been exposed by host. */
439 virtio_rmb(vq); 418 virtio_rmb(vq->weak_barriers);
440 419
441 last_used = (vq->last_used_idx & (vq->vring.num - 1)); 420 last_used = (vq->last_used_idx & (vq->vring.num - 1));
442 i = vq->vring.used->ring[last_used].id; 421 i = vq->vring.used->ring[last_used].id;
@@ -460,7 +439,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
460 * the read in the next get_buf call. */ 439 * the read in the next get_buf call. */
461 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { 440 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
462 vring_used_event(&vq->vring) = vq->last_used_idx; 441 vring_used_event(&vq->vring) = vq->last_used_idx;
463 virtio_mb(vq); 442 virtio_mb(vq->weak_barriers);
464 } 443 }
465 444
466#ifdef DEBUG 445#ifdef DEBUG
@@ -513,7 +492,7 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
513 * entry. Always do both to keep code simple. */ 492 * entry. Always do both to keep code simple. */
514 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 493 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
515 vring_used_event(&vq->vring) = vq->last_used_idx; 494 vring_used_event(&vq->vring) = vq->last_used_idx;
516 virtio_mb(vq); 495 virtio_mb(vq->weak_barriers);
517 if (unlikely(more_used(vq))) { 496 if (unlikely(more_used(vq))) {
518 END_USE(vq); 497 END_USE(vq);
519 return false; 498 return false;
@@ -553,7 +532,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
553 /* TODO: tune this threshold */ 532 /* TODO: tune this threshold */
554 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; 533 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
555 vring_used_event(&vq->vring) = vq->last_used_idx + bufs; 534 vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
556 virtio_mb(vq); 535 virtio_mb(vq->weak_barriers);
557 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { 536 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
558 END_USE(vq); 537 END_USE(vq);
559 return false; 538 return false;