aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hv/ring_buffer.c
diff options
context:
space:
mode:
authorK. Y. Srinivasan <kys@microsoft.com>2012-12-01 09:46:57 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-17 14:41:49 -0500
commitc2b8e5202cf7670f918d0f7439ed2123cd58e1b7 (patch)
tree5dfc99b2b71a348c3942522cb5d3c338684edf1a /drivers/hv/ring_buffer.c
parent2a5c43a821b3b26e6af1cdb987b4daeba6f13a6f (diff)
Drivers: hv: Implement flow management on the send side
Implement flow management on the send side. When the sender is blocked, the reader can potentially signal the sender to indicate there is now room to send. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/hv/ring_buffer.c')
-rw-r--r--drivers/hv/ring_buffer.c51
1 files changed, 50 insertions, 1 deletions
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 2a0babc95709..cafa72ffdc30 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -84,6 +84,50 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
84 return false; 84 return false;
85} 85}
86 86
87/*
88 * To optimize the flow management on the send-side,
89 * when the sender is blocked because of lack of
90 * sufficient space in the ring buffer, potential the
91 * consumer of the ring buffer can signal the producer.
92 * This is controlled by the following parameters:
93 *
94 * 1. pending_send_sz: This is the size in bytes that the
95 * producer is trying to send.
96 * 2. The feature bit feat_pending_send_sz set to indicate if
97 * the consumer of the ring will signal when the ring
98 * state transitions from being full to a state where
99 * there is room for the producer to send the pending packet.
100 */
101
102static bool hv_need_to_signal_on_read(u32 old_rd,
103 struct hv_ring_buffer_info *rbi)
104{
105 u32 prev_write_sz;
106 u32 cur_write_sz;
107 u32 r_size;
108 u32 write_loc = rbi->ring_buffer->write_index;
109 u32 read_loc = rbi->ring_buffer->read_index;
110 u32 pending_sz = rbi->ring_buffer->pending_send_sz;
111
112 /*
113 * If the other end is not blocked on write don't bother.
114 */
115 if (pending_sz == 0)
116 return false;
117
118 r_size = rbi->ring_datasize;
119 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
120 read_loc - write_loc;
121
122 prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
123 old_rd - write_loc;
124
125
126 if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
127 return true;
128
129 return false;
130}
87 131
88/* 132/*
89 * hv_get_next_write_location() 133 * hv_get_next_write_location()
@@ -461,13 +505,14 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
461 * 505 *
462 */ 506 */
463int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, 507int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
464 u32 buflen, u32 offset) 508 u32 buflen, u32 offset, bool *signal)
465{ 509{
466 u32 bytes_avail_towrite; 510 u32 bytes_avail_towrite;
467 u32 bytes_avail_toread; 511 u32 bytes_avail_toread;
468 u32 next_read_location = 0; 512 u32 next_read_location = 0;
469 u64 prev_indices = 0; 513 u64 prev_indices = 0;
470 unsigned long flags; 514 unsigned long flags;
515 u32 old_read;
471 516
472 if (buflen <= 0) 517 if (buflen <= 0)
473 return -EINVAL; 518 return -EINVAL;
@@ -478,6 +523,8 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
478 &bytes_avail_toread, 523 &bytes_avail_toread,
479 &bytes_avail_towrite); 524 &bytes_avail_towrite);
480 525
526 old_read = bytes_avail_toread;
527
481 /* Make sure there is something to read */ 528 /* Make sure there is something to read */
482 if (bytes_avail_toread < buflen) { 529 if (bytes_avail_toread < buflen) {
483 spin_unlock_irqrestore(&inring_info->ring_lock, flags); 530 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
@@ -508,5 +555,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
508 555
509 spin_unlock_irqrestore(&inring_info->ring_lock, flags); 556 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
510 557
558 *signal = hv_need_to_signal_on_read(old_read, inring_info);
559
511 return 0; 560 return 0;
512} 561}