aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorK. Y. Srinivasan <kys@microsoft.com>2012-12-01 09:46:57 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-17 14:41:49 -0500
commitc2b8e5202cf7670f918d0f7439ed2123cd58e1b7 (patch)
tree5dfc99b2b71a348c3942522cb5d3c338684edf1a
parent2a5c43a821b3b26e6af1cdb987b4daeba6f13a6f (diff)
Drivers: hv: Implement flow management on the send side
Implement flow management on the send side. When the sender is blocked, the reader can potentially signal the sender to indicate there is now room to send. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/hv/channel.c12
-rw-r--r--drivers/hv/hyperv_vmbus.h2
-rw-r--r--drivers/hv/ring_buffer.c51
3 files changed, 61 insertions, 4 deletions
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 9303252b2e19..064257e79f23 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -735,6 +735,7 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
735 u32 packetlen; 735 u32 packetlen;
736 u32 userlen; 736 u32 userlen;
737 int ret; 737 int ret;
738 bool signal = false;
738 739
739 *buffer_actual_len = 0; 740 *buffer_actual_len = 0;
740 *requestid = 0; 741 *requestid = 0;
@@ -761,8 +762,10 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
761 762
762 /* Copy over the packet to the user buffer */ 763 /* Copy over the packet to the user buffer */
763 ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen, 764 ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
764 (desc.offset8 << 3)); 765 (desc.offset8 << 3), &signal);
765 766
767 if (signal)
768 vmbus_setevent(channel);
766 769
767 return 0; 770 return 0;
768} 771}
@@ -779,6 +782,7 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
779 u32 packetlen; 782 u32 packetlen;
780 u32 userlen; 783 u32 userlen;
781 int ret; 784 int ret;
785 bool signal = false;
782 786
783 *buffer_actual_len = 0; 787 *buffer_actual_len = 0;
784 *requestid = 0; 788 *requestid = 0;
@@ -805,7 +809,11 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
805 *requestid = desc.trans_id; 809 *requestid = desc.trans_id;
806 810
807 /* Copy over the entire packet to the user buffer */ 811 /* Copy over the entire packet to the user buffer */
808 ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0); 812 ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0,
813 &signal);
814
815 if (signal)
816 vmbus_setevent(channel);
809 817
810 return 0; 818 return 0;
811} 819}
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index becb106918d6..ac111f223821 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -550,7 +550,7 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
550int hv_ringbuffer_read(struct hv_ring_buffer_info *ring_info, 550int hv_ringbuffer_read(struct hv_ring_buffer_info *ring_info,
551 void *buffer, 551 void *buffer,
552 u32 buflen, 552 u32 buflen,
553 u32 offset); 553 u32 offset, bool *signal);
554 554
555 555
556void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, 556void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 2a0babc95709..cafa72ffdc30 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -84,6 +84,50 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
84 return false; 84 return false;
85} 85}
86 86
87/*
88 * To optimize the flow management on the send-side,
89 * when the sender is blocked because of lack of
90 * sufficient space in the ring buffer, potential the
91 * consumer of the ring buffer can signal the producer.
92 * This is controlled by the following parameters:
93 *
94 * 1. pending_send_sz: This is the size in bytes that the
95 * producer is trying to send.
96 * 2. The feature bit feat_pending_send_sz set to indicate if
97 * the consumer of the ring will signal when the ring
98 * state transitions from being full to a state where
99 * there is room for the producer to send the pending packet.
100 */
101
102static bool hv_need_to_signal_on_read(u32 old_rd,
103 struct hv_ring_buffer_info *rbi)
104{
105 u32 prev_write_sz;
106 u32 cur_write_sz;
107 u32 r_size;
108 u32 write_loc = rbi->ring_buffer->write_index;
109 u32 read_loc = rbi->ring_buffer->read_index;
110 u32 pending_sz = rbi->ring_buffer->pending_send_sz;
111
112 /*
113 * If the other end is not blocked on write don't bother.
114 */
115 if (pending_sz == 0)
116 return false;
117
118 r_size = rbi->ring_datasize;
119 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
120 read_loc - write_loc;
121
122 prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
123 old_rd - write_loc;
124
125
126 if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
127 return true;
128
129 return false;
130}
87 131
88/* 132/*
89 * hv_get_next_write_location() 133 * hv_get_next_write_location()
@@ -461,13 +505,14 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
461 * 505 *
462 */ 506 */
463int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, 507int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
464 u32 buflen, u32 offset) 508 u32 buflen, u32 offset, bool *signal)
465{ 509{
466 u32 bytes_avail_towrite; 510 u32 bytes_avail_towrite;
467 u32 bytes_avail_toread; 511 u32 bytes_avail_toread;
468 u32 next_read_location = 0; 512 u32 next_read_location = 0;
469 u64 prev_indices = 0; 513 u64 prev_indices = 0;
470 unsigned long flags; 514 unsigned long flags;
515 u32 old_read;
471 516
472 if (buflen <= 0) 517 if (buflen <= 0)
473 return -EINVAL; 518 return -EINVAL;
@@ -478,6 +523,8 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
478 &bytes_avail_toread, 523 &bytes_avail_toread,
479 &bytes_avail_towrite); 524 &bytes_avail_towrite);
480 525
526 old_read = bytes_avail_toread;
527
481 /* Make sure there is something to read */ 528 /* Make sure there is something to read */
482 if (bytes_avail_toread < buflen) { 529 if (bytes_avail_toread < buflen) {
483 spin_unlock_irqrestore(&inring_info->ring_lock, flags); 530 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
@@ -508,5 +555,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
508 555
509 spin_unlock_irqrestore(&inring_info->ring_lock, flags); 556 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
510 557
558 *signal = hv_need_to_signal_on_read(old_read, inring_info);
559
511 return 0; 560 return 0;
512} 561}