aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/hyperv/netvsc.c14
-rw-r--r--drivers/net/hyperv/netvsc_drv.c24
2 files changed, 12 insertions, 26 deletions
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 4a807e44ec60..b6ac152a9bd0 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -435,6 +435,9 @@ static void netvsc_send_completion(struct hv_device *device,
435 nvsc_packet->completion.send.send_completion_ctx); 435 nvsc_packet->completion.send.send_completion_ctx);
436 436
437 atomic_dec(&net_device->num_outstanding_sends); 437 atomic_dec(&net_device->num_outstanding_sends);
438
439 if (netif_queue_stopped(ndev))
440 netif_wake_queue(ndev);
438 } else { 441 } else {
439 netdev_err(ndev, "Unknown send completion packet type- " 442 netdev_err(ndev, "Unknown send completion packet type- "
440 "%d received!!\n", nvsp_packet->hdr.msg_type); 443 "%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -485,11 +488,16 @@ int netvsc_send(struct hv_device *device,
485 488
486 } 489 }
487 490
488 if (ret != 0) 491 if (ret == 0) {
492 atomic_inc(&net_device->num_outstanding_sends);
493 } else if (ret == -EAGAIN) {
494 netif_stop_queue(ndev);
495 if (atomic_read(&net_device->num_outstanding_sends) < 1)
496 netif_wake_queue(ndev);
497 } else {
489 netdev_err(ndev, "Unable to send packet %p ret %d\n", 498 netdev_err(ndev, "Unable to send packet %p ret %d\n",
490 packet, ret); 499 packet, ret);
491 else 500 }
492 atomic_inc(&net_device->num_outstanding_sends);
493 501
494 return ret; 502 return ret;
495} 503}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index b69c3a4d1e9e..7da85ebd7ac6 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -43,15 +43,10 @@
43struct net_device_context { 43struct net_device_context {
44 /* point back to our device context */ 44 /* point back to our device context */
45 struct hv_device *device_ctx; 45 struct hv_device *device_ctx;
46 atomic_t avail;
47 struct delayed_work dwork; 46 struct delayed_work dwork;
48}; 47};
49 48
50 49
51#define PACKET_PAGES_LOWATER 8
52/* Need this many pages to handle worst case fragmented packet */
53#define PACKET_PAGES_HIWATER (MAX_SKB_FRAGS + 2)
54
55static int ring_size = 128; 50static int ring_size = 128;
56module_param(ring_size, int, S_IRUGO); 51module_param(ring_size, int, S_IRUGO);
57MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); 52MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
@@ -144,18 +139,8 @@ static void netvsc_xmit_completion(void *context)
144 139
145 kfree(packet); 140 kfree(packet);
146 141
147 if (skb) { 142 if (skb)
148 struct net_device *net = skb->dev;
149 struct net_device_context *net_device_ctx = netdev_priv(net);
150 unsigned int num_pages = skb_shinfo(skb)->nr_frags + 2;
151
152 dev_kfree_skb_any(skb); 143 dev_kfree_skb_any(skb);
153
154 atomic_add(num_pages, &net_device_ctx->avail);
155 if (atomic_read(&net_device_ctx->avail) >=
156 PACKET_PAGES_HIWATER)
157 netif_wake_queue(net);
158 }
159} 144}
160 145
161static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) 146static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
@@ -167,8 +152,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
167 152
168 /* Add 1 for skb->data and additional one for RNDIS */ 153 /* Add 1 for skb->data and additional one for RNDIS */
169 num_pages = skb_shinfo(skb)->nr_frags + 1 + 1; 154 num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
170 if (num_pages > atomic_read(&net_device_ctx->avail))
171 return NETDEV_TX_BUSY;
172 155
173 /* Allocate a netvsc packet based on # of frags. */ 156 /* Allocate a netvsc packet based on # of frags. */
174 packet = kzalloc(sizeof(struct hv_netvsc_packet) + 157 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
@@ -218,10 +201,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
218 if (ret == 0) { 201 if (ret == 0) {
219 net->stats.tx_bytes += skb->len; 202 net->stats.tx_bytes += skb->len;
220 net->stats.tx_packets++; 203 net->stats.tx_packets++;
221
222 atomic_sub(num_pages, &net_device_ctx->avail);
223 if (atomic_read(&net_device_ctx->avail) < PACKET_PAGES_LOWATER)
224 netif_stop_queue(net);
225 } else { 204 } else {
226 /* we are shutting down or bus overloaded, just drop packet */ 205 /* we are shutting down or bus overloaded, just drop packet */
227 net->stats.tx_dropped++; 206 net->stats.tx_dropped++;
@@ -391,7 +370,6 @@ static int netvsc_probe(struct hv_device *dev,
391 370
392 net_device_ctx = netdev_priv(net); 371 net_device_ctx = netdev_priv(net);
393 net_device_ctx->device_ctx = dev; 372 net_device_ctx->device_ctx = dev;
394 atomic_set(&net_device_ctx->avail, ring_size);
395 hv_set_drvdata(dev, net); 373 hv_set_drvdata(dev, net);
396 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp); 374 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
397 375