summaryrefslogtreecommitdiffstats
path: root/drivers/net/usb
diff options
context:
space:
mode:
authorBernd Eckstein <3erndeckstein@gmail.com>2019-05-20 11:31:09 -0400
committerDavid S. Miller <davem@davemloft.net>2019-05-21 16:10:52 -0400
commit94d250fae48e6f873d8362308f5c4d02cd1b1fd2 (patch)
tree15785936b3f238629f44947280edfe7362d42b41 /drivers/net/usb
parentaf8f3fb7fb077c9df9fed97113a031e792163def (diff)
usbnet: ipheth: fix racing condition
Fix a racing condition in ipheth.c that can lead to slow performance. Bug: In ipheth_tx(), netif_wake_queue() may be called on the callback ipheth_sndbulk_callback(), _before_ netif_stop_queue() is called. When this happens, the queue is stopped longer than it needs to be, thus reducing network performance. Fix: Move netif_stop_queue() in front of usb_submit_urb(). Now the order is always correct. In case, usb_submit_urb() fails, the queue is woken up again as callback will not fire. Testing: This racing condition is usually not noticeable, as it has to occur very frequently to slowdown the network. The callback from the USB is usually triggered slow enough, so the situation does not appear. However, on a Ubuntu Linux on VMWare Workstation, running on Windows 10, the we loose the race quite often and the following speedup can be noticed: Without this patch: Download: 4.10 Mbit/s, Upload: 4.01 Mbit/s With this patch: Download: 36.23 Mbit/s, Upload: 17.61 Mbit/s Signed-off-by: Oliver Zweigle <Oliver.Zweigle@faro.com> Signed-off-by: Bernd Eckstein <3ernd.Eckstein@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/usb')
-rw-r--r--drivers/net/usb/ipheth.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index c247aed2dceb..8c01fbf68a89 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -383,17 +383,18 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
383 dev); 383 dev);
384 dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 384 dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
385 385
386 netif_stop_queue(net);
386 retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC); 387 retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC);
387 if (retval) { 388 if (retval) {
388 dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n", 389 dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n",
389 __func__, retval); 390 __func__, retval);
390 dev->net->stats.tx_errors++; 391 dev->net->stats.tx_errors++;
391 dev_kfree_skb_any(skb); 392 dev_kfree_skb_any(skb);
393 netif_wake_queue(net);
392 } else { 394 } else {
393 dev->net->stats.tx_packets++; 395 dev->net->stats.tx_packets++;
394 dev->net->stats.tx_bytes += skb->len; 396 dev->net->stats.tx_bytes += skb->len;
395 dev_consume_skb_any(skb); 397 dev_consume_skb_any(skb);
396 netif_stop_queue(net);
397 } 398 }
398 399
399 return NETDEV_TX_OK; 400 return NETDEV_TX_OK;