aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hv/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hv/ring_buffer.c')
-rw-r--r--drivers/hv/ring_buffer.c95
1 files changed, 12 insertions, 83 deletions
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index a40a73a7b71d..fe586bf74e17 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -33,25 +33,21 @@
33void hv_begin_read(struct hv_ring_buffer_info *rbi) 33void hv_begin_read(struct hv_ring_buffer_info *rbi)
34{ 34{
35 rbi->ring_buffer->interrupt_mask = 1; 35 rbi->ring_buffer->interrupt_mask = 1;
36 mb(); 36 virt_mb();
37} 37}
38 38
39u32 hv_end_read(struct hv_ring_buffer_info *rbi) 39u32 hv_end_read(struct hv_ring_buffer_info *rbi)
40{ 40{
41 u32 read;
42 u32 write;
43 41
44 rbi->ring_buffer->interrupt_mask = 0; 42 rbi->ring_buffer->interrupt_mask = 0;
45 mb(); 43 virt_mb();
46 44
47 /* 45 /*
48 * Now check to see if the ring buffer is still empty. 46 * Now check to see if the ring buffer is still empty.
49 * If it is not, we raced and we need to process new 47 * If it is not, we raced and we need to process new
50 * incoming messages. 48 * incoming messages.
51 */ 49 */
52 hv_get_ringbuffer_availbytes(rbi, &read, &write); 50 return hv_get_bytes_to_read(rbi);
53
54 return read;
55} 51}
56 52
57/* 53/*
@@ -72,69 +68,17 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
72 68
73static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi) 69static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
74{ 70{
75 mb(); 71 virt_mb();
76 if (rbi->ring_buffer->interrupt_mask) 72 if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
77 return false; 73 return false;
78 74
79 /* check interrupt_mask before read_index */ 75 /* check interrupt_mask before read_index */
80 rmb(); 76 virt_rmb();
81 /* 77 /*
82 * This is the only case we need to signal when the 78 * This is the only case we need to signal when the
83 * ring transitions from being empty to non-empty. 79 * ring transitions from being empty to non-empty.
84 */ 80 */
85 if (old_write == rbi->ring_buffer->read_index) 81 if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
86 return true;
87
88 return false;
89}
90
91/*
92 * To optimize the flow management on the send-side,
93 * when the sender is blocked because of lack of
94 * sufficient space in the ring buffer, potential the
95 * consumer of the ring buffer can signal the producer.
96 * This is controlled by the following parameters:
97 *
98 * 1. pending_send_sz: This is the size in bytes that the
99 * producer is trying to send.
100 * 2. The feature bit feat_pending_send_sz set to indicate if
101 * the consumer of the ring will signal when the ring
102 * state transitions from being full to a state where
103 * there is room for the producer to send the pending packet.
104 */
105
106static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
107{
108 u32 cur_write_sz;
109 u32 r_size;
110 u32 write_loc;
111 u32 read_loc = rbi->ring_buffer->read_index;
112 u32 pending_sz;
113
114 /*
115 * Issue a full memory barrier before making the signaling decision.
116 * Here is the reason for having this barrier:
117 * If the reading of the pend_sz (in this function)
118 * were to be reordered and read before we commit the new read
119 * index (in the calling function) we could
120 * have a problem. If the host were to set the pending_sz after we
121 * have sampled pending_sz and go to sleep before we commit the
122 * read index, we could miss sending the interrupt. Issue a full
123 * memory barrier to address this.
124 */
125 mb();
126
127 pending_sz = rbi->ring_buffer->pending_send_sz;
128 write_loc = rbi->ring_buffer->write_index;
129 /* If the other end is not blocked on write don't bother. */
130 if (pending_sz == 0)
131 return false;
132
133 r_size = rbi->ring_datasize;
134 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
135 read_loc - write_loc;
136
137 if (cur_write_sz >= pending_sz)
138 return true; 82 return true;
139 83
140 return false; 84 return false;
@@ -188,17 +132,9 @@ hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
188 u32 next_read_location) 132 u32 next_read_location)
189{ 133{
190 ring_info->ring_buffer->read_index = next_read_location; 134 ring_info->ring_buffer->read_index = next_read_location;
135 ring_info->priv_read_index = next_read_location;
191} 136}
192 137
193
194/* Get the start of the ring buffer. */
195static inline void *
196hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
197{
198 return (void *)ring_info->ring_buffer->buffer;
199}
200
201
202/* Get the size of the ring buffer. */ 138/* Get the size of the ring buffer. */
203static inline u32 139static inline u32
204hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info) 140hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
@@ -332,7 +268,6 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
332{ 268{
333 int i = 0; 269 int i = 0;
334 u32 bytes_avail_towrite; 270 u32 bytes_avail_towrite;
335 u32 bytes_avail_toread;
336 u32 totalbytes_towrite = 0; 271 u32 totalbytes_towrite = 0;
337 272
338 u32 next_write_location; 273 u32 next_write_location;
@@ -348,9 +283,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
348 if (lock) 283 if (lock)
349 spin_lock_irqsave(&outring_info->ring_lock, flags); 284 spin_lock_irqsave(&outring_info->ring_lock, flags);
350 285
351 hv_get_ringbuffer_availbytes(outring_info, 286 bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
352 &bytes_avail_toread,
353 &bytes_avail_towrite);
354 287
355 /* 288 /*
356 * If there is only room for the packet, assume it is full. 289 * If there is only room for the packet, assume it is full.
@@ -384,7 +317,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
384 sizeof(u64)); 317 sizeof(u64));
385 318
386 /* Issue a full memory barrier before updating the write index */ 319 /* Issue a full memory barrier before updating the write index */
387 mb(); 320 virt_mb();
388 321
389 /* Now, update the write location */ 322 /* Now, update the write location */
390 hv_set_next_write_location(outring_info, next_write_location); 323 hv_set_next_write_location(outring_info, next_write_location);
@@ -401,7 +334,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
401 void *buffer, u32 buflen, u32 *buffer_actual_len, 334 void *buffer, u32 buflen, u32 *buffer_actual_len,
402 u64 *requestid, bool *signal, bool raw) 335 u64 *requestid, bool *signal, bool raw)
403{ 336{
404 u32 bytes_avail_towrite;
405 u32 bytes_avail_toread; 337 u32 bytes_avail_toread;
406 u32 next_read_location = 0; 338 u32 next_read_location = 0;
407 u64 prev_indices = 0; 339 u64 prev_indices = 0;
@@ -417,10 +349,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
417 *buffer_actual_len = 0; 349 *buffer_actual_len = 0;
418 *requestid = 0; 350 *requestid = 0;
419 351
420 hv_get_ringbuffer_availbytes(inring_info, 352 bytes_avail_toread = hv_get_bytes_to_read(inring_info);
421 &bytes_avail_toread,
422 &bytes_avail_towrite);
423
424 /* Make sure there is something to read */ 353 /* Make sure there is something to read */
425 if (bytes_avail_toread < sizeof(desc)) { 354 if (bytes_avail_toread < sizeof(desc)) {
426 /* 355 /*
@@ -464,7 +393,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
464 * the writer may start writing to the read area once the read index 393 * the writer may start writing to the read area once the read index
465 * is updated. 394 * is updated.
466 */ 395 */
467 mb(); 396 virt_mb();
468 397
469 /* Update the read index */ 398 /* Update the read index */
470 hv_set_next_read_location(inring_info, next_read_location); 399 hv_set_next_read_location(inring_info, next_read_location);