diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2013-03-17 22:52:19 -0400 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2013-03-19 23:30:41 -0400 |
commit | a9a0fef779074838230e04a322fd2bdc921f4f4f (patch) | |
tree | a7d25b5002e84ac3df54788ec2620ccc1f7c2c21 | |
parent | 73640c991e2f2804939af70567b23e4c54b7c266 (diff) |
virtio_ring: expose virtio barriers for use in vringh.
The host side of ring needs this logic too.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r-- | drivers/virtio/virtio_ring.c | 33 | ||||
-rw-r--r-- | include/linux/virtio_ring.h | 57 |
2 files changed, 63 insertions, 27 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index ffd7e7da5d3b..245177c286ae 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -24,27 +24,6 @@ | |||
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/hrtimer.h> | 25 | #include <linux/hrtimer.h> |
26 | 26 | ||
27 | /* virtio guest is communicating with a virtual "device" that actually runs on | ||
28 | * a host processor. Memory barriers are used to control SMP effects. */ | ||
29 | #ifdef CONFIG_SMP | ||
30 | /* Where possible, use SMP barriers which are more lightweight than mandatory | ||
31 | * barriers, because mandatory barriers control MMIO effects on accesses | ||
32 | * through relaxed memory I/O windows (which virtio-pci does not use). */ | ||
33 | #define virtio_mb(vq) \ | ||
34 | do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0) | ||
35 | #define virtio_rmb(vq) \ | ||
36 | do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0) | ||
37 | #define virtio_wmb(vq) \ | ||
38 | do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0) | ||
39 | #else | ||
40 | /* We must force memory ordering even if guest is UP since host could be | ||
41 | * running on another CPU, but SMP barriers are defined to barrier() in that | ||
42 | * configuration. So fall back to mandatory barriers instead. */ | ||
43 | #define virtio_mb(vq) mb() | ||
44 | #define virtio_rmb(vq) rmb() | ||
45 | #define virtio_wmb(vq) wmb() | ||
46 | #endif | ||
47 | |||
48 | #ifdef DEBUG | 27 | #ifdef DEBUG |
49 | /* For development, we want to crash whenever the ring is screwed. */ | 28 | /* For development, we want to crash whenever the ring is screwed. */ |
50 | #define BAD_RING(_vq, fmt, args...) \ | 29 | #define BAD_RING(_vq, fmt, args...) \ |
@@ -276,7 +255,7 @@ add_head: | |||
276 | 255 | ||
277 | /* Descriptors and available array need to be set before we expose the | 256 | /* Descriptors and available array need to be set before we expose the |
278 | * new available array entries. */ | 257 | * new available array entries. */ |
279 | virtio_wmb(vq); | 258 | virtio_wmb(vq->weak_barriers); |
280 | vq->vring.avail->idx++; | 259 | vq->vring.avail->idx++; |
281 | vq->num_added++; | 260 | vq->num_added++; |
282 | 261 | ||
@@ -312,7 +291,7 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq) | |||
312 | START_USE(vq); | 291 | START_USE(vq); |
313 | /* We need to expose available array entries before checking avail | 292 | /* We need to expose available array entries before checking avail |
314 | * event. */ | 293 | * event. */ |
315 | virtio_mb(vq); | 294 | virtio_mb(vq->weak_barriers); |
316 | 295 | ||
317 | old = vq->vring.avail->idx - vq->num_added; | 296 | old = vq->vring.avail->idx - vq->num_added; |
318 | new = vq->vring.avail->idx; | 297 | new = vq->vring.avail->idx; |
@@ -436,7 +415,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) | |||
436 | } | 415 | } |
437 | 416 | ||
438 | /* Only get used array entries after they have been exposed by host. */ | 417 | /* Only get used array entries after they have been exposed by host. */ |
439 | virtio_rmb(vq); | 418 | virtio_rmb(vq->weak_barriers); |
440 | 419 | ||
441 | last_used = (vq->last_used_idx & (vq->vring.num - 1)); | 420 | last_used = (vq->last_used_idx & (vq->vring.num - 1)); |
442 | i = vq->vring.used->ring[last_used].id; | 421 | i = vq->vring.used->ring[last_used].id; |
@@ -460,7 +439,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) | |||
460 | * the read in the next get_buf call. */ | 439 | * the read in the next get_buf call. */ |
461 | if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { | 440 | if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { |
462 | vring_used_event(&vq->vring) = vq->last_used_idx; | 441 | vring_used_event(&vq->vring) = vq->last_used_idx; |
463 | virtio_mb(vq); | 442 | virtio_mb(vq->weak_barriers); |
464 | } | 443 | } |
465 | 444 | ||
466 | #ifdef DEBUG | 445 | #ifdef DEBUG |
@@ -513,7 +492,7 @@ bool virtqueue_enable_cb(struct virtqueue *_vq) | |||
513 | * entry. Always do both to keep code simple. */ | 492 | * entry. Always do both to keep code simple. */ |
514 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; | 493 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; |
515 | vring_used_event(&vq->vring) = vq->last_used_idx; | 494 | vring_used_event(&vq->vring) = vq->last_used_idx; |
516 | virtio_mb(vq); | 495 | virtio_mb(vq->weak_barriers); |
517 | if (unlikely(more_used(vq))) { | 496 | if (unlikely(more_used(vq))) { |
518 | END_USE(vq); | 497 | END_USE(vq); |
519 | return false; | 498 | return false; |
@@ -553,7 +532,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) | |||
553 | /* TODO: tune this threshold */ | 532 | /* TODO: tune this threshold */ |
554 | bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; | 533 | bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; |
555 | vring_used_event(&vq->vring) = vq->last_used_idx + bufs; | 534 | vring_used_event(&vq->vring) = vq->last_used_idx + bufs; |
556 | virtio_mb(vq); | 535 | virtio_mb(vq->weak_barriers); |
557 | if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { | 536 | if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { |
558 | END_USE(vq); | 537 | END_USE(vq); |
559 | return false; | 538 | return false; |
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index 63c6ea199519..ca3ad41c2c82 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h | |||
@@ -4,6 +4,63 @@ | |||
4 | #include <linux/irqreturn.h> | 4 | #include <linux/irqreturn.h> |
5 | #include <uapi/linux/virtio_ring.h> | 5 | #include <uapi/linux/virtio_ring.h> |
6 | 6 | ||
7 | /* | ||
8 | * Barriers in virtio are tricky. Non-SMP virtio guests can't assume | ||
9 | * they're not on an SMP host system, so they need to assume real | ||
10 | * barriers. Non-SMP virtio hosts could skip the barriers, but does | ||
11 | * anyone care? | ||
12 | * | ||
13 | * For virtio_pci on SMP, we don't need to order with respect to MMIO | ||
14 | * accesses through relaxed memory I/O windows, so smp_mb() et al are | ||
15 | * sufficient. | ||
16 | * | ||
17 | * For using virtio to talk to real devices (eg. other heterogeneous | ||
18 | * CPUs) we do need real barriers. In theory, we could be using both | ||
19 | * kinds of virtio, so it's a runtime decision, and the branch is | ||
20 | * actually quite cheap. | ||
21 | */ | ||
22 | |||
23 | #ifdef CONFIG_SMP | ||
24 | static inline void virtio_mb(bool weak_barriers) | ||
25 | { | ||
26 | if (weak_barriers) | ||
27 | smp_mb(); | ||
28 | else | ||
29 | mb(); | ||
30 | } | ||
31 | |||
32 | static inline void virtio_rmb(bool weak_barriers) | ||
33 | { | ||
34 | if (weak_barriers) | ||
35 | smp_rmb(); | ||
36 | else | ||
37 | rmb(); | ||
38 | } | ||
39 | |||
40 | static inline void virtio_wmb(bool weak_barriers) | ||
41 | { | ||
42 | if (weak_barriers) | ||
43 | smp_wmb(); | ||
44 | else | ||
45 | wmb(); | ||
46 | } | ||
47 | #else | ||
48 | static inline void virtio_mb(bool weak_barriers) | ||
49 | { | ||
50 | mb(); | ||
51 | } | ||
52 | |||
53 | static inline void virtio_rmb(bool weak_barriers) | ||
54 | { | ||
55 | rmb(); | ||
56 | } | ||
57 | |||
58 | static inline void virtio_wmb(bool weak_barriers) | ||
59 | { | ||
60 | wmb(); | ||
61 | } | ||
62 | #endif | ||
63 | |||
7 | struct virtio_device; | 64 | struct virtio_device; |
8 | struct virtqueue; | 65 | struct virtqueue; |
9 | 66 | ||