diff options
author | hayeswang <hayeswang@realtek.com> | 2014-03-06 22:04:36 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-03-07 16:24:28 -0500 |
commit | 0c3121fcf10da24dfd667c5bf8d71fcfa261599c (patch) | |
tree | 05078c6e2f4dc6781f9a6622b59c9efc9869bcb2 /drivers/net/usb | |
parent | 21949ab7df0d78b575c87c2e70192b487fd37511 (diff) |
r8152: up the priority of the transmission
move the tx_bottom() from delayed_work to tasklet. It makes the rx
and tx balanced. If the device is in runtime suspend when getting
the tx packet, wakeup the device before trasmitting.
Signed-off-by: Hayes Wang <hayeswang@realtek.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/usb')
-rw-r--r-- | drivers/net/usb/r8152.c | 45 |
1 files changed, 27 insertions, 18 deletions
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 00b3192568fe..f1eaa18825ab 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -447,6 +447,7 @@ enum rtl8152_flags { | |||
447 | RTL8152_LINK_CHG, | 447 | RTL8152_LINK_CHG, |
448 | SELECTIVE_SUSPEND, | 448 | SELECTIVE_SUSPEND, |
449 | PHY_RESET, | 449 | PHY_RESET, |
450 | SCHEDULE_TASKLET, | ||
450 | }; | 451 | }; |
451 | 452 | ||
452 | /* Define these values to match your device */ | 453 | /* Define these values to match your device */ |
@@ -1071,7 +1072,7 @@ static void write_bulk_callback(struct urb *urb) | |||
1071 | return; | 1072 | return; |
1072 | 1073 | ||
1073 | if (!skb_queue_empty(&tp->tx_queue)) | 1074 | if (!skb_queue_empty(&tp->tx_queue)) |
1074 | schedule_delayed_work(&tp->schedule, 0); | 1075 | tasklet_schedule(&tp->tl); |
1075 | } | 1076 | } |
1076 | 1077 | ||
1077 | static void intr_callback(struct urb *urb) | 1078 | static void intr_callback(struct urb *urb) |
@@ -1335,9 +1336,9 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) | |||
1335 | u8 *tx_data; | 1336 | u8 *tx_data; |
1336 | 1337 | ||
1337 | __skb_queue_head_init(&skb_head); | 1338 | __skb_queue_head_init(&skb_head); |
1338 | spin_lock_bh(&tx_queue->lock); | 1339 | spin_lock(&tx_queue->lock); |
1339 | skb_queue_splice_init(tx_queue, &skb_head); | 1340 | skb_queue_splice_init(tx_queue, &skb_head); |
1340 | spin_unlock_bh(&tx_queue->lock); | 1341 | spin_unlock(&tx_queue->lock); |
1341 | 1342 | ||
1342 | tx_data = agg->head; | 1343 | tx_data = agg->head; |
1343 | agg->skb_num = agg->skb_len = 0; | 1344 | agg->skb_num = agg->skb_len = 0; |
@@ -1374,20 +1375,20 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) | |||
1374 | } | 1375 | } |
1375 | 1376 | ||
1376 | if (!skb_queue_empty(&skb_head)) { | 1377 | if (!skb_queue_empty(&skb_head)) { |
1377 | spin_lock_bh(&tx_queue->lock); | 1378 | spin_lock(&tx_queue->lock); |
1378 | skb_queue_splice(&skb_head, tx_queue); | 1379 | skb_queue_splice(&skb_head, tx_queue); |
1379 | spin_unlock_bh(&tx_queue->lock); | 1380 | spin_unlock(&tx_queue->lock); |
1380 | } | 1381 | } |
1381 | 1382 | ||
1382 | netif_tx_lock_bh(tp->netdev); | 1383 | netif_tx_lock(tp->netdev); |
1383 | 1384 | ||
1384 | if (netif_queue_stopped(tp->netdev) && | 1385 | if (netif_queue_stopped(tp->netdev) && |
1385 | skb_queue_len(&tp->tx_queue) < tp->tx_qlen) | 1386 | skb_queue_len(&tp->tx_queue) < tp->tx_qlen) |
1386 | netif_wake_queue(tp->netdev); | 1387 | netif_wake_queue(tp->netdev); |
1387 | 1388 | ||
1388 | netif_tx_unlock_bh(tp->netdev); | 1389 | netif_tx_unlock(tp->netdev); |
1389 | 1390 | ||
1390 | ret = usb_autopm_get_interface(tp->intf); | 1391 | ret = usb_autopm_get_interface_async(tp->intf); |
1391 | if (ret < 0) | 1392 | if (ret < 0) |
1392 | goto out_tx_fill; | 1393 | goto out_tx_fill; |
1393 | 1394 | ||
@@ -1395,9 +1396,9 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) | |||
1395 | agg->head, (int)(tx_data - (u8 *)agg->head), | 1396 | agg->head, (int)(tx_data - (u8 *)agg->head), |
1396 | (usb_complete_t)write_bulk_callback, agg); | 1397 | (usb_complete_t)write_bulk_callback, agg); |
1397 | 1398 | ||
1398 | ret = usb_submit_urb(agg->urb, GFP_KERNEL); | 1399 | ret = usb_submit_urb(agg->urb, GFP_ATOMIC); |
1399 | if (ret < 0) | 1400 | if (ret < 0) |
1400 | usb_autopm_put_interface(tp->intf); | 1401 | usb_autopm_put_interface_async(tp->intf); |
1401 | 1402 | ||
1402 | out_tx_fill: | 1403 | out_tx_fill: |
1403 | return ret; | 1404 | return ret; |
@@ -1535,6 +1536,7 @@ static void bottom_half(unsigned long data) | |||
1535 | return; | 1536 | return; |
1536 | 1537 | ||
1537 | rx_bottom(tp); | 1538 | rx_bottom(tp); |
1539 | tx_bottom(tp); | ||
1538 | } | 1540 | } |
1539 | 1541 | ||
1540 | static | 1542 | static |
@@ -1630,7 +1632,7 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) | |||
1630 | } | 1632 | } |
1631 | 1633 | ||
1632 | static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, | 1634 | static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, |
1633 | struct net_device *netdev) | 1635 | struct net_device *netdev) |
1634 | { | 1636 | { |
1635 | struct r8152 *tp = netdev_priv(netdev); | 1637 | struct r8152 *tp = netdev_priv(netdev); |
1636 | 1638 | ||
@@ -1638,13 +1640,17 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, | |||
1638 | 1640 | ||
1639 | skb_queue_tail(&tp->tx_queue, skb); | 1641 | skb_queue_tail(&tp->tx_queue, skb); |
1640 | 1642 | ||
1641 | if (list_empty(&tp->tx_free) && | 1643 | if (!list_empty(&tp->tx_free)) { |
1642 | skb_queue_len(&tp->tx_queue) > tp->tx_qlen) | 1644 | if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { |
1645 | set_bit(SCHEDULE_TASKLET, &tp->flags); | ||
1646 | schedule_delayed_work(&tp->schedule, 0); | ||
1647 | } else { | ||
1648 | usb_mark_last_busy(tp->udev); | ||
1649 | tasklet_schedule(&tp->tl); | ||
1650 | } | ||
1651 | } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) | ||
1643 | netif_stop_queue(netdev); | 1652 | netif_stop_queue(netdev); |
1644 | 1653 | ||
1645 | if (!list_empty(&tp->tx_free)) | ||
1646 | schedule_delayed_work(&tp->schedule, 0); | ||
1647 | |||
1648 | return NETDEV_TX_OK; | 1654 | return NETDEV_TX_OK; |
1649 | } | 1655 | } |
1650 | 1656 | ||
@@ -2523,8 +2529,11 @@ static void rtl_work_func_t(struct work_struct *work) | |||
2523 | if (test_bit(RTL8152_SET_RX_MODE, &tp->flags)) | 2529 | if (test_bit(RTL8152_SET_RX_MODE, &tp->flags)) |
2524 | _rtl8152_set_rx_mode(tp->netdev); | 2530 | _rtl8152_set_rx_mode(tp->netdev); |
2525 | 2531 | ||
2526 | if (tp->speed & LINK_STATUS) | 2532 | if (test_bit(SCHEDULE_TASKLET, &tp->flags) && |
2527 | tx_bottom(tp); | 2533 | (tp->speed & LINK_STATUS)) { |
2534 | clear_bit(SCHEDULE_TASKLET, &tp->flags); | ||
2535 | tasklet_schedule(&tp->tl); | ||
2536 | } | ||
2528 | 2537 | ||
2529 | if (test_bit(PHY_RESET, &tp->flags)) | 2538 | if (test_bit(PHY_RESET, &tp->flags)) |
2530 | rtl_phy_reset(tp); | 2539 | rtl_phy_reset(tp); |