aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hv
diff options
context:
space:
mode:
authorK. Y. Srinivasan <kys@microsoft.com>2016-04-02 19:17:38 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-04-30 17:00:16 -0400
commita389fcfd2cb57793931a9fb98fed076aae50bb6c (patch)
tree91238d1cc9d66a0bb118ec910f9be75cbe05f865 /drivers/hv
parentbbca503b2ee000b5743a49c4995316cc499f44c6 (diff)
Drivers: hv: vmbus: Fix signaling logic in hv_need_to_signal_on_read()
On the consumer side, we have interrupt driven flow management of the producer. It is sufficient to base the signaling decision on the amount of space that is available to write after the read is complete. The current code samples the previous available space and uses this in making the signaling decision. This state can be stale and is unnecessary. Since the state can be stale, we end up not signaling the host (when we should) and this can result in a hang. Fix this problem by removing the unnecessary check. I would like to thank Arseney Romanenko <arseneyr@microsoft.com> for pointing out this issue. Also, issue a full memory barrier before making the signaling descision to correctly deal with potential reordering of the write (read index) followed by the read of pending_sz. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Tested-by: Dexuan Cui <decui@microsoft.com> Cc: <stable@vger.kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/hv')
-rw-r--r--drivers/hv/ring_buffer.c26
1 files changed, 20 insertions, 6 deletions
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 5613e2b5cff7..a40a73a7b71d 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -103,15 +103,29 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
103 * there is room for the producer to send the pending packet. 103 * there is room for the producer to send the pending packet.
104 */ 104 */
105 105
106static bool hv_need_to_signal_on_read(u32 prev_write_sz, 106static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
107 struct hv_ring_buffer_info *rbi)
108{ 107{
109 u32 cur_write_sz; 108 u32 cur_write_sz;
110 u32 r_size; 109 u32 r_size;
111 u32 write_loc = rbi->ring_buffer->write_index; 110 u32 write_loc;
112 u32 read_loc = rbi->ring_buffer->read_index; 111 u32 read_loc = rbi->ring_buffer->read_index;
113 u32 pending_sz = rbi->ring_buffer->pending_send_sz; 112 u32 pending_sz;
114 113
114 /*
115 * Issue a full memory barrier before making the signaling decision.
116 * Here is the reason for having this barrier:
117 * If the reading of the pend_sz (in this function)
118 * were to be reordered and read before we commit the new read
119 * index (in the calling function) we could
120 * have a problem. If the host were to set the pending_sz after we
121 * have sampled pending_sz and go to sleep before we commit the
122 * read index, we could miss sending the interrupt. Issue a full
123 * memory barrier to address this.
124 */
125 mb();
126
127 pending_sz = rbi->ring_buffer->pending_send_sz;
128 write_loc = rbi->ring_buffer->write_index;
115 /* If the other end is not blocked on write don't bother. */ 129 /* If the other end is not blocked on write don't bother. */
116 if (pending_sz == 0) 130 if (pending_sz == 0)
117 return false; 131 return false;
@@ -120,7 +134,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz,
120 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) : 134 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
121 read_loc - write_loc; 135 read_loc - write_loc;
122 136
123 if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz)) 137 if (cur_write_sz >= pending_sz)
124 return true; 138 return true;
125 139
126 return false; 140 return false;
@@ -455,7 +469,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
455 /* Update the read index */ 469 /* Update the read index */
456 hv_set_next_read_location(inring_info, next_read_location); 470 hv_set_next_read_location(inring_info, next_read_location);
457 471
458 *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info); 472 *signal = hv_need_to_signal_on_read(inring_info);
459 473
460 return ret; 474 return ret;
461} 475}