aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/hyperv/netvsc_drv.c
diff options
context:
space:
mode:
authorHaiyang Zhang <haiyangz@microsoft.com>2011-12-02 14:56:25 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2011-12-09 19:26:50 -0500
commit1d06825b0ede541f63b5577435abd2fc649a9b5e (patch)
treef3b72c945f340d1f6e81ec124a8fb352d9de747f /drivers/net/hyperv/netvsc_drv.c
parent9d41c5bb07ad97b7777283d7922292f456ba4bfd (diff)
net/hyperv: Fix the stop/wake queue mechanism
The ring buffer is only used to pass meta data for outbound packets. The actual payload is accessed by DMA from the host. So the stop/wake queue mechanism based on counting and comparing number of pages sent v.s. number of pages in the ring buffer is wrong. Also, there is a race condition in the stop/wake queue calls, which can stop xmit queue forever. The new stop/wake queue mechanism is based on the actual bytes used by outbound packets in the ring buffer. The check for number of outstanding sends after stop queue prevents the race condition that can cause wake queue happening earlier than stop queue. Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com> Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Reported-by: Long Li <longli@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/net/hyperv/netvsc_drv.c')
-rw-r--r--drivers/net/hyperv/netvsc_drv.c24
1 files changed, 1 insertions, 23 deletions
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index b69c3a4d1e9..7da85ebd7ac 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -43,15 +43,10 @@
43struct net_device_context { 43struct net_device_context {
44 /* point back to our device context */ 44 /* point back to our device context */
45 struct hv_device *device_ctx; 45 struct hv_device *device_ctx;
46 atomic_t avail;
47 struct delayed_work dwork; 46 struct delayed_work dwork;
48}; 47};
49 48
50 49
51#define PACKET_PAGES_LOWATER 8
52/* Need this many pages to handle worst case fragmented packet */
53#define PACKET_PAGES_HIWATER (MAX_SKB_FRAGS + 2)
54
55static int ring_size = 128; 50static int ring_size = 128;
56module_param(ring_size, int, S_IRUGO); 51module_param(ring_size, int, S_IRUGO);
57MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); 52MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
@@ -144,18 +139,8 @@ static void netvsc_xmit_completion(void *context)
144 139
145 kfree(packet); 140 kfree(packet);
146 141
147 if (skb) { 142 if (skb)
148 struct net_device *net = skb->dev;
149 struct net_device_context *net_device_ctx = netdev_priv(net);
150 unsigned int num_pages = skb_shinfo(skb)->nr_frags + 2;
151
152 dev_kfree_skb_any(skb); 143 dev_kfree_skb_any(skb);
153
154 atomic_add(num_pages, &net_device_ctx->avail);
155 if (atomic_read(&net_device_ctx->avail) >=
156 PACKET_PAGES_HIWATER)
157 netif_wake_queue(net);
158 }
159} 144}
160 145
161static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) 146static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
@@ -167,8 +152,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
167 152
168 /* Add 1 for skb->data and additional one for RNDIS */ 153 /* Add 1 for skb->data and additional one for RNDIS */
169 num_pages = skb_shinfo(skb)->nr_frags + 1 + 1; 154 num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
170 if (num_pages > atomic_read(&net_device_ctx->avail))
171 return NETDEV_TX_BUSY;
172 155
173 /* Allocate a netvsc packet based on # of frags. */ 156 /* Allocate a netvsc packet based on # of frags. */
174 packet = kzalloc(sizeof(struct hv_netvsc_packet) + 157 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
@@ -218,10 +201,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
218 if (ret == 0) { 201 if (ret == 0) {
219 net->stats.tx_bytes += skb->len; 202 net->stats.tx_bytes += skb->len;
220 net->stats.tx_packets++; 203 net->stats.tx_packets++;
221
222 atomic_sub(num_pages, &net_device_ctx->avail);
223 if (atomic_read(&net_device_ctx->avail) < PACKET_PAGES_LOWATER)
224 netif_stop_queue(net);
225 } else { 204 } else {
226 /* we are shutting down or bus overloaded, just drop packet */ 205 /* we are shutting down or bus overloaded, just drop packet */
227 net->stats.tx_dropped++; 206 net->stats.tx_dropped++;
@@ -391,7 +370,6 @@ static int netvsc_probe(struct hv_device *dev,
391 370
392 net_device_ctx = netdev_priv(net); 371 net_device_ctx = netdev_priv(net);
393 net_device_ctx->device_ctx = dev; 372 net_device_ctx->device_ctx = dev;
394 atomic_set(&net_device_ctx->avail, ring_size);
395 hv_set_drvdata(dev, net); 373 hv_set_drvdata(dev, net);
396 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp); 374 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
397 375