aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/virtio')
-rw-r--r--drivers/virtio/virtio_ring.c26
1 files changed, 22 insertions, 4 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 1ee97d402a48..827f7e042610 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -21,6 +21,24 @@
21#include <linux/virtio_config.h> 21#include <linux/virtio_config.h>
22#include <linux/device.h> 22#include <linux/device.h>
23 23
24/* virtio guest is communicating with a virtual "device" that actually runs on
25 * a host processor. Memory barriers are used to control SMP effects. */
26#ifdef CONFIG_SMP
27/* Where possible, use SMP barriers which are more lightweight than mandatory
28 * barriers, because mandatory barriers control MMIO effects on accesses
29 * through relaxed memory I/O windows (which virtio does not use). */
30#define virtio_mb() smp_mb()
31#define virtio_rmb() smp_rmb()
32#define virtio_wmb() smp_wmb()
33#else
34/* We must force memory ordering even if guest is UP since host could be
35 * running on another CPU, but SMP barriers are defined to barrier() in that
36 * configuration. So fall back to mandatory barriers instead. */
37#define virtio_mb() mb()
38#define virtio_rmb() rmb()
39#define virtio_wmb() wmb()
40#endif
41
24#ifdef DEBUG 42#ifdef DEBUG
25/* For development, we want to crash whenever the ring is screwed. */ 43/* For development, we want to crash whenever the ring is screwed. */
26#define BAD_RING(_vq, fmt, args...) \ 44#define BAD_RING(_vq, fmt, args...) \
@@ -220,13 +238,13 @@ static void vring_kick(struct virtqueue *_vq)
220 START_USE(vq); 238 START_USE(vq);
221 /* Descriptors and available array need to be set before we expose the 239 /* Descriptors and available array need to be set before we expose the
222 * new available array entries. */ 240 * new available array entries. */
223 wmb(); 241 virtio_wmb();
224 242
225 vq->vring.avail->idx += vq->num_added; 243 vq->vring.avail->idx += vq->num_added;
226 vq->num_added = 0; 244 vq->num_added = 0;
227 245
228 /* Need to update avail index before checking if we should notify */ 246 /* Need to update avail index before checking if we should notify */
229 mb(); 247 virtio_mb();
230 248
231 if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) 249 if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
232 /* Prod other side to tell it about changes. */ 250 /* Prod other side to tell it about changes. */
@@ -285,7 +303,7 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
285 } 303 }
286 304
287 /* Only get used array entries after they have been exposed by host. */ 305 /* Only get used array entries after they have been exposed by host. */
288 rmb(); 306 virtio_rmb();
289 307
290 i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id; 308 i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
291 *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len; 309 *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
@@ -323,7 +341,7 @@ static bool vring_enable_cb(struct virtqueue *_vq)
323 /* We optimistically turn back on interrupts, then check if there was 341 /* We optimistically turn back on interrupts, then check if there was
324 * more to do. */ 342 * more to do. */
325 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 343 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
326 mb(); 344 virtio_mb();
327 if (unlikely(more_used(vq))) { 345 if (unlikely(more_used(vq))) {
328 END_USE(vq); 346 END_USE(vq);
329 return false; 347 return false;